repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
timapage/pyqt6-yolov8
main.py
[ { "identifier": "CameraCaptureThread", "path": "src/qt/stream/video_capture.py", "snippet": "class CameraCaptureThread(QThread):\n send_video_info = pyqtSignal(dict)\n send_frame = pyqtSignal(list)\n def __init__(self):\n super(CameraCaptureThread, self).__init__()\n self.thread_name = \"CameraCaptureThread\"\n self.threadFlag = False\n \n def set_start_config(self, video_source):\n self.threadFlag = True\n self.get_video_source(video_source)\n \n def get_video_source(self, video_source):\n self.video_source = video_source\n \n def get_video_info(self, video_cap):\n video_info = {}\n video_info[\"FPS\"] = video_cap.get(cv.CAP_PROP_FPS)\n video_info[\"length\"] = int(video_cap.get(cv.CAP_PROP_FRAME_COUNT))\n video_info[\"size\"] = (int(video_cap.get(cv.CAP_PROP_FRAME_WIDTH)),int(video_cap.get(cv.CAP_PROP_FRAME_HEIGHT)))\n return video_info\n \n def stop_capture(self):\n self.threadFlag = False\n\n def run(self): \n cap = cv.VideoCapture(self.video_source)\n if not cap.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_info = self.get_video_info(cap)\n self.send_video_info.emit(video_info)\n\n idx_frame = 0\n while self.threadFlag:\n ret, frame = cap.read()\n if ret is False or self.threadFlag is False:\n break\n self.send_frame.emit(list([idx_frame,frame]))\n idx_frame += 1\n self.send_frame.emit(list([None,None]))\n cap.release()" }, { "identifier": "VideoVisualizationThread", "path": "src/qt/stream/visualize.py", "snippet": "class VideoVisualizationThread(QThread):\n send_thread_start_stop_flag = pyqtSignal(str)\n send_displayable_frame = pyqtSignal(QImage)\n send_ai_output = pyqtSignal(list)\n def __init__(self):\n super(VideoVisualizationThread, self).__init__()\n self.thread_name = \"VideoVisualizationThread\"\n self.threadFlag = False\n \n def set_start_config(self, screen_size):\n self.threadFlag = True\n self.frame_buffer = FrameBuffer(10)\n self.ai_output = []\n self.get_screen_size(screen_size)\n \n def get_fresh_frame(self, frame_list):\n self.frame_buffer.put(frame=copy.deepcopy(frame_list[1]), frame_id=frame_list[0], realtime=True)\n\n def get_ai_output(self, ai_output):\n self.ai_output = copy.deepcopy(ai_output)\n \n def get_screen_size(self, screen_size):\n self.iw, self.ih = screen_size\n \n def stop_display(self):\n self.threadFlag = False\n\n def run(self):\n self.send_thread_start_stop_flag.emit(\"processing_on_camera\")\n while self.threadFlag:\n frame_id, frame = self.frame_buffer.get()\n if frame_id is not None:\n frame = draw_results(frame, self.ai_output)\n show_image = self.convert_cv_qt(frame, self.ih, self.iw)\n self.send_displayable_frame.emit(show_image)\n self.send_ai_output.emit(self.ai_output)\n else:\n break\n blank_image = np.zeros((self.ih, self.iw, 3))\n blank_image = cv.cvtColor(blank_image.astype('uint8'), cv.COLOR_BGR2RGBA)\n show_image = QImage(blank_image.data, blank_image.shape[1], blank_image.shape[0], QImage.Format.Format_RGBA8888)\n self.send_displayable_frame.emit(show_image)\n self.send_ai_output.emit([])\n self.send_thread_start_stop_flag.emit(\"waiting_for_setting\")\n\n\n def convert_cv_qt(self, image, screen_height, screen_width):\n h, w, _ = image.shape\n scale = min(screen_width / w, screen_height / h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv.resize(image, (nw, nh))\n image_paded = np.full(shape=[screen_height, screen_width, 3], fill_value=0)\n dw, dh = (screen_width - nw) // 2, (screen_height - nh) // 2\n image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized\n image_paded = cv.cvtColor(image_paded.astype('uint8'), cv.COLOR_BGR2RGBA)\n return QImage(image_paded.data, image_paded.shape[1], image_paded.shape[0], QImage.Format.Format_RGBA8888)" }, { "identifier": "AiWorkerThread", "path": "src/qt/stream/ai_worker.py", "snippet": "class AiWorkerThread(QThread):\n send_ai_output = pyqtSignal(list)\n def __init__(self):\n super(AiWorkerThread, self).__init__()\n self.thread_name = \"AiWorkerThread\"\n self.threadFlag = False\n \n def set_start_config(self, ai_task, model_name=\"yolov8n\", confidence_threshold=0.35, iou_threshold=0.45):\n self.threadFlag = True\n self.ai_task = ai_task\n self.latest_frame = LatestFrame()\n self.confi_thr = confidence_threshold\n self.iou_thr = iou_threshold\n self.model_name = model_name\n self._init_yolo()\n self._init_tracker()\n\n def set_iou_threshold(self, iou_threshold):\n self.iou_thr = iou_threshold\n \n def set_confidence_threshold(self, confidence_threshold):\n self.confi_thr = confidence_threshold\n \n def set_model_name(self, model_name):\n self.model_name = model_name\n\n def _init_yolo(self):\n if self.ai_task == \"object_detection\":\n self.detector = YoloDetector()\n self.detector.init(\n model_path=os.path.join(ROOT, f\"weights/detection/{self.model_name}.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n self.pose_detector = PoseDetector()\n self.pose_detector.init(\n model_path=os.path.join(ROOT, f\"weights/pose/{self.model_name}-pose.onnx\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"segmentation\":\n self.seg_detector = YOLOSeg()\n self.seg_detector.init(\n model_path=os.path.join(ROOT, f\"weights/segmentation/{self.model_name}-seg.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n\n def _init_tracker(self):\n self.tracker = DeepSort(\n model_path=os.path.join(ROOT, f\"src/models/tracking/deep_sort/deep/checkpoint/ckpt.t7\"))\n \n def get_frame(self, frame_list):\n self.latest_frame.put(frame=frame_list[1], frame_id=frame_list[0], realtime=True)\n \n def stop_process(self):\n self.threadFlag = False\n \n def run(self):\n while self.threadFlag:\n frame_id, frame = self.latest_frame.get()\n if frame_id is None:\n break\n model_output = []\n if self.ai_task == \"object_detection\":\n model_output = self.detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n model_output = self.pose_detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"segmentation\":\n model_output = self.seg_detector.inference(frame, self.confi_thr, self.iou_thr)\n\n model_output = self.tracker.update(\n detection_results=model_output,\n ori_img=frame)\n \n self.model_output = add_image_id(model_output, frame_id)\n self.send_ai_output.emit(model_output)" }, { "identifier": "Ui_MainWindow", "path": "src/ui/main_window.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(878, 617)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/images/icons/icon.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n MainWindow.setWindowIcon(icon)\n MainWindow.setStyleSheet(\"background-color: rgb(119, 118, 123);\\n\"\n\"border-color: rgb(119, 118, 123);\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setStyleSheet(\"background-color: rgb(119, 118, 123);\\n\"\n\"border-color: rgb(119, 118, 123);\")\n self.centralwidget.setObjectName(\"centralwidget\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.centralwidget)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout()\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.groupBox = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox.setFont(font)\n self.groupBox.setStyleSheet(\"\")\n self.groupBox.setObjectName(\"groupBox\")\n self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox)\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.radioButton_det = QtWidgets.QRadioButton(self.groupBox)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.radioButton_det.setFont(font)\n self.radioButton_det.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"\")\n self.radioButton_det.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.Zimbabwe))\n self.radioButton_det.setChecked(True)\n self.radioButton_det.setObjectName(\"radioButton_det\")\n self.verticalLayout_4.addWidget(self.radioButton_det)\n self.radioButton_seg = QtWidgets.QRadioButton(self.groupBox)\n self.radioButton_seg.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\")\n self.radioButton_seg.setObjectName(\"radioButton_seg\")\n self.verticalLayout_4.addWidget(self.radioButton_seg)\n self.radioButton_pose = QtWidgets.QRadioButton(self.groupBox)\n self.radioButton_pose.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\"}\")\n self.radioButton_pose.setObjectName(\"radioButton_pose\")\n self.verticalLayout_4.addWidget(self.radioButton_pose)\n self.verticalLayout_2.addWidget(self.groupBox)\n self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_2.setFont(font)\n self.groupBox_2.setObjectName(\"groupBox_2\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_2)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.comboBox_model = QtWidgets.QComboBox(self.groupBox_2)\n self.comboBox_model.setAutoFillBackground(False)\n self.comboBox_model.setStyleSheet(\"QComboBox QAbstractItemView {\\n\"\n\"font-size: 16px;\\n\"\n\"outline:none;\\n\"\n\"border:none;}\\n\"\n\"\\n\"\n\"QComboBox{\\n\"\n\"font-size: 16px;\\n\"\n\"\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"border-width:0px;\\n\"\n\"border-color:white;\\n\"\n\"border-style:solid;\\n\"\n\"background-color: rgba(200, 200, 200,50);}\\n\"\n\"\\n\"\n\"QComboBox::drop-down {\\n\"\n\"margin-top:1;\\n\"\n\"height:20;\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"background-color: rgba(200, 200, 200,50);\\n\"\n\"border-image: url(:/images/icons/roll_down.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QComboBox::disabled{\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"\")\n self.comboBox_model.setCurrentText(\"YOLOv8n\")\n self.comboBox_model.setObjectName(\"comboBox_model\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.horizontalLayout_2.addWidget(self.comboBox_model)\n self.verticalLayout_2.addWidget(self.groupBox_2)\n self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_3.setFont(font)\n self.groupBox_3.setObjectName(\"groupBox_3\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.groupBox_3)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.pushButton_file = QtWidgets.QPushButton(self.groupBox_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_file.sizePolicy().hasHeightForWidth())\n self.pushButton_file.setSizePolicy(sizePolicy)\n self.pushButton_file.setStyleSheet(\"QPushButton{\\n\"\n\" image: url(:/images/icons/video.png);\\n\"\n\"font-size: 14px;\\n\"\n\"font-weight: bold;\\n\"\n\"color:white;\\n\"\n\"text-align: center center;\\n\"\n\"padding-left: 5px;\\n\"\n\"padding-right: 5px;\\n\"\n\"padding-top: 4px;\\n\"\n\"padding-bottom: 4px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-color: rgba(255, 255, 255, 255);\\n\"\n\"border-radius: 3px;\\n\"\n\"background-color: rgba(200, 200, 200,0);}\\n\"\n\"\\n\"\n\"QPushButton:focus{outline: none;}\\n\"\n\"\\n\"\n\"QPushButton::pressed{\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;\\n\"\n\" background-color: #bf513b;}\\n\"\n\"\\n\"\n\"QPushButton::disabled{\\n\"\n\" image: url(:/images/icons/video_off.png);\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(48,148,243,80);}\")\n self.pushButton_file.setText(\"\")\n self.pushButton_file.setObjectName(\"pushButton_file\")\n self.horizontalLayout_3.addWidget(self.pushButton_file)\n self.pushButton_cam = QtWidgets.QPushButton(self.groupBox_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_cam.sizePolicy().hasHeightForWidth())\n self.pushButton_cam.setSizePolicy(sizePolicy)\n self.pushButton_cam.setStyleSheet(\"QPushButton{\\n\"\n\" image: url(:/images/icons/camera_on.png);\\n\"\n\"font-size: 14px;\\n\"\n\"font-weight: bold;\\n\"\n\"color:white;\\n\"\n\"text-align: center center;\\n\"\n\"padding-left: 5px;\\n\"\n\"padding-right: 5px;\\n\"\n\"padding-top: 4px;\\n\"\n\"padding-bottom: 4px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-color: rgba(255, 255, 255, 255);\\n\"\n\"border-radius: 3px;\\n\"\n\"background-color: rgba(200, 200, 200,0);}\\n\"\n\"\\n\"\n\"QPushButton:focus{outline: none;}\\n\"\n\"\\n\"\n\"QPushButton::pressed{\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;\\n\"\n\" background-color: #bf513b;}\\n\"\n\"\\n\"\n\"QPushButton::disabled{\\n\"\n\" image: url(:/images/icons/camera_off.png);\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(48,148,243,80);}url(:/images/icons/camera_on.png)\")\n self.pushButton_cam.setText(\"\")\n self.pushButton_cam.setObjectName(\"pushButton_cam\")\n self.horizontalLayout_3.addWidget(self.pushButton_cam)\n self.verticalLayout_2.addWidget(self.groupBox_3)\n self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_4.setFont(font)\n self.groupBox_4.setObjectName(\"groupBox_4\")\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.groupBox_4)\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.doubleSpinBox_conf = QtWidgets.QDoubleSpinBox(self.groupBox_4)\n self.doubleSpinBox_conf.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_conf.setMaximum(1.0)\n self.doubleSpinBox_conf.setSingleStep(0.01)\n self.doubleSpinBox_conf.setStepType(QtWidgets.QAbstractSpinBox.StepType.AdaptiveDecimalStepType)\n self.doubleSpinBox_conf.setProperty(\"value\", 0.3)\n self.doubleSpinBox_conf.setObjectName(\"doubleSpinBox_conf\")\n self.horizontalLayout_5.addWidget(self.doubleSpinBox_conf)\n self.horizontalSlider_conf = QtWidgets.QSlider(self.groupBox_4)\n self.horizontalSlider_conf.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_conf.setMaximum(99)\n self.horizontalSlider_conf.setSingleStep(1)\n self.horizontalSlider_conf.setPageStep(99)\n self.horizontalSlider_conf.setProperty(\"value\", 30)\n self.horizontalSlider_conf.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_conf.setObjectName(\"horizontalSlider_conf\")\n self.horizontalLayout_5.addWidget(self.horizontalSlider_conf)\n self.verticalLayout_2.addWidget(self.groupBox_4)\n self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_5.setFont(font)\n self.groupBox_5.setObjectName(\"groupBox_5\")\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.groupBox_5)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.doubleSpinBox_iou = QtWidgets.QDoubleSpinBox(self.groupBox_5)\n self.doubleSpinBox_iou.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_iou.setMaximum(1.0)\n self.doubleSpinBox_iou.setSingleStep(0.01)\n self.doubleSpinBox_iou.setStepType(QtWidgets.QAbstractSpinBox.StepType.AdaptiveDecimalStepType)\n self.doubleSpinBox_iou.setProperty(\"value\", 0.45)\n self.doubleSpinBox_iou.setObjectName(\"doubleSpinBox_iou\")\n self.horizontalLayout_6.addWidget(self.doubleSpinBox_iou)\n self.horizontalSlider_iou = QtWidgets.QSlider(self.groupBox_5)\n self.horizontalSlider_iou.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_iou.setProperty(\"value\", 45)\n self.horizontalSlider_iou.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_iou.setObjectName(\"horizontalSlider_iou\")\n self.horizontalLayout_6.addWidget(self.horizontalSlider_iou)\n self.verticalLayout_2.addWidget(self.groupBox_5)\n self.groupBox_6 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_6.setFont(font)\n self.groupBox_6.setObjectName(\"groupBox_6\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.groupBox_6)\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.doubleSpinBox_interval = QtWidgets.QDoubleSpinBox(self.groupBox_6)\n self.doubleSpinBox_interval.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_interval.setDecimals(0)\n self.doubleSpinBox_interval.setMaximum(10.0)\n self.doubleSpinBox_interval.setObjectName(\"doubleSpinBox_interval\")\n self.horizontalLayout_7.addWidget(self.doubleSpinBox_interval)\n self.horizontalSlider_interval = QtWidgets.QSlider(self.groupBox_6)\n self.horizontalSlider_interval.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_interval.setMaximum(10)\n self.horizontalSlider_interval.setPageStep(1)\n self.horizontalSlider_interval.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_interval.setObjectName(\"horizontalSlider_interval\")\n self.horizontalLayout_7.addWidget(self.horizontalSlider_interval)\n self.verticalLayout_2.addWidget(self.groupBox_6)\n self.verticalLayout_2.setStretch(0, 3)\n self.verticalLayout_2.setStretch(1, 1)\n self.verticalLayout_2.setStretch(2, 2)\n self.verticalLayout_2.setStretch(3, 1)\n self.verticalLayout_2.setStretch(4, 1)\n self.verticalLayout_2.setStretch(5, 1)\n self.horizontalLayout.addLayout(self.verticalLayout_2)\n self.verticalLayout_3 = QtWidgets.QVBoxLayout()\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.label_display = QtWidgets.QLabel(self.centralwidget)\n self.label_display.setStyleSheet(\"background-color: rgb(0, 0, 0);\")\n self.label_display.setText(\"\")\n self.label_display.setObjectName(\"label_display\")\n self.verticalLayout_3.addWidget(self.label_display)\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.pushButton_play = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_play.sizePolicy().hasHeightForWidth())\n self.pushButton_play.setSizePolicy(sizePolicy)\n self.pushButton_play.setMinimumSize(QtCore.QSize(40, 40))\n self.pushButton_play.setStyleSheet(\"QPushButton {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 0);\\n\"\n\"}\\n\"\n\"QPushButton::focus{outline: none;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 150);\\n\"\n\"}\")\n self.pushButton_play.setText(\"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Disabled, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Disabled, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Active, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Active, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Selected, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Selected, QtGui.QIcon.State.On)\n self.pushButton_play.setIcon(icon1)\n self.pushButton_play.setIconSize(QtCore.QSize(30, 30))\n self.pushButton_play.setCheckable(True)\n self.pushButton_play.setObjectName(\"pushButton_play\")\n self.horizontalLayout_8.addWidget(self.pushButton_play)\n self.progressBar_play = QtWidgets.QProgressBar(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.progressBar_play.sizePolicy().hasHeightForWidth())\n self.progressBar_play.setSizePolicy(sizePolicy)\n self.progressBar_play.setMinimumSize(QtCore.QSize(0, 0))\n self.progressBar_play.setStyleSheet(\"QProgressBar{ \\n\"\n\"color: rgb(255, 255, 255); \\n\"\n\"font:12pt;\\n\"\n\" border-radius:2px; \\n\"\n\"text-align:center; \\n\"\n\"border:none; \\n\"\n\"background-color: rgba(215, 215, 215,100);} \\n\"\n\"\\n\"\n\"QProgressBar:chunk{ \\n\"\n\"border-radius:0px; \\n\"\n\"background: rgba(55, 55, 55, 200);}\")\n self.progressBar_play.setMaximum(1000)\n self.progressBar_play.setProperty(\"value\", 0)\n self.progressBar_play.setFormat(\"\")\n self.progressBar_play.setObjectName(\"progressBar_play\")\n self.horizontalLayout_8.addWidget(self.progressBar_play)\n self.pushButton_stop = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_stop.sizePolicy().hasHeightForWidth())\n self.pushButton_stop.setSizePolicy(sizePolicy)\n self.pushButton_stop.setMinimumSize(QtCore.QSize(40, 40))\n self.pushButton_stop.setStyleSheet(\"QPushButton {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 0);\\n\"\n\"}\\n\"\n\"QPushButton::focus{outline: none;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 150);}\")\n self.pushButton_stop.setText(\"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/images/icons/stop.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n self.pushButton_stop.setIcon(icon2)\n self.pushButton_stop.setIconSize(QtCore.QSize(30, 30))\n self.pushButton_stop.setObjectName(\"pushButton_stop\")\n self.horizontalLayout_8.addWidget(self.pushButton_stop)\n self.horizontalLayout_8.setStretch(0, 1)\n self.horizontalLayout_8.setStretch(1, 12)\n self.horizontalLayout_8.setStretch(2, 1)\n self.verticalLayout_3.addLayout(self.horizontalLayout_8)\n self.tableWidget_results = QtWidgets.QTableWidget(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.tableWidget_results.sizePolicy().hasHeightForWidth())\n self.tableWidget_results.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu\")\n font.setPointSize(11)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget_results.setFont(font)\n self.tableWidget_results.setAutoFillBackground(True)\n self.tableWidget_results.setStyleSheet(\"\")\n self.tableWidget_results.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAsNeeded)\n self.tableWidget_results.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.SizeAdjustPolicy.AdjustToContents)\n self.tableWidget_results.setObjectName(\"tableWidget_results\")\n self.tableWidget_results.setColumnCount(4)\n self.tableWidget_results.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(3, item)\n self.tableWidget_results.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget_results.horizontalHeader().setSortIndicatorShown(False)\n self.tableWidget_results.horizontalHeader().setStretchLastSection(True)\n self.verticalLayout_3.addWidget(self.tableWidget_results)\n self.verticalLayout_3.setStretch(0, 15)\n self.verticalLayout_3.setStretch(1, 1)\n self.verticalLayout_3.setStretch(2, 4)\n self.horizontalLayout.addLayout(self.verticalLayout_3)\n self.horizontalLayout.setStretch(0, 2)\n self.horizontalLayout.setStretch(1, 12)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.label_status = QtWidgets.QLabel(self.centralwidget)\n self.label_status.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\" font-size: 16px;\\n\"\n\" font-weight: light;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"}\\n\"\n\"\")\n self.label_status.setText(\"\")\n self.label_status.setObjectName(\"label_status\")\n self.verticalLayout.addWidget(self.label_status)\n self.verticalLayout.setStretch(0, 9)\n self.horizontalLayout_4.addLayout(self.verticalLayout)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"YOLOv8 GUI\"))\n self.groupBox.setTitle(_translate(\"MainWindow\", \"Tasks\"))\n self.radioButton_det.setText(_translate(\"MainWindow\", \"Detection\"))\n self.radioButton_seg.setText(_translate(\"MainWindow\", \"Segmentation\"))\n self.radioButton_pose.setText(_translate(\"MainWindow\", \"Pose Estimation\"))\n self.groupBox_2.setTitle(_translate(\"MainWindow\", \"Models\"))\n self.comboBox_model.setItemText(0, _translate(\"MainWindow\", \"YOLOv8n\"))\n self.comboBox_model.setItemText(1, _translate(\"MainWindow\", \"YOLOv8s\"))\n self.comboBox_model.setItemText(2, _translate(\"MainWindow\", \"YOLOv8m\"))\n self.comboBox_model.setItemText(3, _translate(\"MainWindow\", \"YOLOv8l\"))\n self.comboBox_model.setItemText(4, _translate(\"MainWindow\", \"YOLOv8x\"))\n self.groupBox_3.setTitle(_translate(\"MainWindow\", \"Inputs\"))\n self.groupBox_4.setTitle(_translate(\"MainWindow\", \"Confidence\"))\n self.groupBox_5.setTitle(_translate(\"MainWindow\", \"IoU\"))\n self.groupBox_6.setTitle(_translate(\"MainWindow\", \"Frame Interval\"))\n item = self.tableWidget_results.horizontalHeaderItem(0)\n item.setText(_translate(\"MainWindow\", \"ID\"))\n item = self.tableWidget_results.horizontalHeaderItem(1)\n item.setText(_translate(\"MainWindow\", \"Class\"))\n item = self.tableWidget_results.horizontalHeaderItem(2)\n item.setText(_translate(\"MainWindow\", \"Confidence\"))\n item = self.tableWidget_results.horizontalHeaderItem(3)\n item.setText(_translate(\"MainWindow\", \"BBox\"))" }, { "identifier": "FileProcessThread", "path": "src/qt/video/video_worker.py", "snippet": "class FileProcessThread(QThread):\n send_thread_start_finish_flag = pyqtSignal(str)\n send_video_info = pyqtSignal(dict)\n send_ai_output = pyqtSignal(list)\n send_display_frame = pyqtSignal(QImage)\n send_play_progress = pyqtSignal(int)\n def __init__(self):\n super(FileProcessThread, self).__init__()\n self.thread_name = \"FileProcessThread\"\n self.threadFlag = False\n \n def set_start_config(self, video_path, ai_task, screen_size, model_name=\"yolov8n\", confidence_threshold=0.35, iou_threshold=0.45, frame_interval=0):\n self.threadFlag = True\n self.video_path = video_path\n self.ai_task = ai_task\n self.pause_process = False\n self.confi_thr = confidence_threshold\n self.iou_thr = iou_threshold\n self.model_name = model_name\n self.frame_interval = frame_interval\n self.get_screen_size(screen_size)\n self._init_yolo()\n self._init_tracker()\n\n def set_iou_threshold(self, iou_threshold):\n self.iou_thr = iou_threshold\n \n def set_confidence_threshold(self, confidence_threshold):\n self.confi_thr = confidence_threshold\n \n def set_model_name(self, model_name):\n self.model_name = model_name\n \n def set_frame_interval(self, frame_interval):\n self.frame_interval = frame_interval\n \n def get_screen_size(self, screen_size):\n self.iw, self.ih = screen_size\n\n def _init_yolo(self):\n if self.ai_task == \"object_detection\":\n self.detector = YoloDetector()\n self.detector.init(\n model_path=os.path.join(ROOT, f\"weights/detection/{self.model_name}.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n self.pose_detector = PoseDetector()\n self.pose_detector.init(\n model_path=os.path.join(ROOT, f\"weights/pose/{self.model_name}-pose.onnx\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"segmentation\":\n self.seg_detector = YOLOSeg()\n self.seg_detector.init(\n model_path=os.path.join(ROOT, f\"weights/segmentation/{self.model_name}-seg.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n\n def _init_tracker(self):\n self.tracker = DeepSort(\n model_path=os.path.join(ROOT, f\"src/models/tracking/deep_sort/deep/checkpoint/ckpt.t7\"))\n \n def stop_process(self):\n self.threadFlag = False\n \n def toggle_play_pause(self):\n self.pause_process = not self.pause_process\n \n def run(self):\n self.send_thread_start_finish_flag.emit(\"processing_on_file\")\n media_fmt = self.check_image_or_video(self.video_path)\n cap = cv.VideoCapture(self.video_path)\n if not cap.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_info = self.get_video_info(cap)\n self.send_video_info.emit(video_info)\n\n model_output = []\n frame_id = 1\n while self.threadFlag:\n if self.pause_process:\n continue\n ret, frame = cap.read()\n if ret is False:\n break\n\n if frame_id % int(self.frame_interval+1) == 0:\n model_output = []\n if self.ai_task == \"object_detection\":\n model_output = self.detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n model_output = self.pose_detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"segmentation\":\n model_output = self.seg_detector.inference(frame, self.confi_thr, self.iou_thr)\n \n if media_fmt == \"video\":\n model_output = self.tracker.update(\n detection_results=model_output,\n ori_img=frame)\n model_output = add_image_id(model_output, frame_id)\n frame = draw_results(frame, model_output)\n display_frame = self.convert_cv_qt(frame, self.ih, self.iw)\n\n self.send_display_frame.emit(display_frame)\n self.send_play_progress.emit(int(frame_id/video_info[\"length\"]*1000))\n self.send_ai_output.emit(model_output)\n frame_id += 1\n cap.release()\n if media_fmt == \"video\":\n blank_image = np.zeros((self.ih, self.iw, 3))\n blank_image = cv.cvtColor(blank_image.astype('uint8'), cv.COLOR_BGR2RGBA)\n show_image = QImage(blank_image.data, blank_image.shape[1], blank_image.shape[0], QImage.Format.Format_RGBA8888)\n self.send_display_frame.emit(show_image)\n self.send_ai_output.emit([])\n self.send_thread_start_finish_flag.emit(\"waiting_for_setting\")\n \n def get_video_info(self, video_cap):\n video_info = {}\n video_info[\"FPS\"] = video_cap.get(cv.CAP_PROP_FPS)\n video_info[\"length\"] = int(video_cap.get(cv.CAP_PROP_FRAME_COUNT))\n video_info[\"size\"] = (int(video_cap.get(cv.CAP_PROP_FRAME_WIDTH)),int(video_cap.get(cv.CAP_PROP_FRAME_HEIGHT)))\n return video_info\n\n def check_image_or_video(self, media_path):\n img_fm = (\".tif\", \".tiff\", \".jpg\", \".jpeg\", \".gif\", \".png\", \".eps\", \".raw\", \".cr2\", \".nef\", \".orf\", \".sr2\", \".bmp\", \".ppm\", \".heif\")\n vid_fm = (\".flv\", \".avi\", \".mp4\", \".3gp\", \".mov\", \".webm\", \".ogg\", \".qt\", \".avchd\")\n media_fms = {\"image\": img_fm, \"video\": vid_fm}\n if any(media_path.lower().endswith(media_fms[\"image\"]) for ext in media_fms[\"image\"]):\n return \"image\"\n elif any(media_path.lower().endswith(media_fms[\"video\"]) for ext in media_fms[\"video\"]):\n return \"video\"\n else:\n raise TypeError(\"Please select an image or video\")\n \n def convert_cv_qt(self, image, screen_height, screen_width):\n h, w, _ = image.shape\n scale = min(screen_width / w, screen_height / h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv.resize(image, (nw, nh))\n image_paded = np.full(shape=[screen_height, screen_width, 3], fill_value=0)\n dw, dh = (screen_width - nw) // 2, (screen_height - nh) // 2\n image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized\n image_paded = cv.cvtColor(image_paded.astype('uint8'), cv.COLOR_BGR2RGBA)\n return QImage(image_paded.data, image_paded.shape[1], image_paded.shape[0], QImage.Format.Format_RGBA8888)" } ]
from src.qt.stream.video_capture import CameraCaptureThread from src.qt.stream.visualize import VideoVisualizationThread from src.qt.stream.ai_worker import AiWorkerThread from src.ui.main_window import Ui_MainWindow from src.qt.video.video_worker import FileProcessThread from PyQt6 import QtGui, QtWidgets from PyQt6.QtCore import Qt import sys import numpy as np
12,247
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread() self.display_thread = VideoVisualizationThread()
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread() self.display_thread = VideoVisualizationThread()
self.file_process_thread = FileProcessThread()
4
2023-10-18 09:21:01+00:00
16k
S-LoRA/S-LoRA
slora/models/llama/model.py
[ { "identifier": "LlamaPreLayerInfer", "path": "slora/models/llama/layer_infer/pre_layer_infer.py", "snippet": "class LlamaPreLayerInfer(PreLayerInferTpl):\n \"\"\"\n \"\"\"\n\n def __init__(self, tp_rank, world_size, network_config, mode):\n super().__init__(tp_rank, world_size, network_config, mode)\n tp_vocab_size_ = network_config[\"vocab_size\"] // self.world_size_\n self.vob_start_id_ = tp_vocab_size_ * self.tp_rank_\n self.vob_end_id_ = tp_vocab_size_ * (self.tp_rank_ + 1)\n return\n\n @mark_cost_time(\"pre context forward\")\n def context_forward(self, input_ids, infer_state: LlamaInferStateInfo, layer_weight: LlamaPreAndPostLayerWeight):\n total_token_num = infer_state.total_token_num\n input_ids = input_ids[0:total_token_num]\n\n input_mask = torch.logical_or(self.vob_start_id_ > input_ids, input_ids >= self.vob_end_id_)\n tmp_input_ids = (input_ids - self.vob_start_id_)\n tmp_input_ids[input_mask] = 0\n input_embdings = torch.embedding(layer_weight.wte_weight_, tmp_input_ids, padding_idx=-1)\n input_embdings[input_mask] = 0.0\n if self.world_size_ > 1:\n dist.all_reduce(input_embdings, op=dist.ReduceOp.SUM, async_op=False)\n return input_embdings\n\n def token_forward(self, input_ids, infer_state: LlamaInferStateInfo, layer_weight: LlamaPreAndPostLayerWeight):\n input_mask = torch.logical_or(self.vob_start_id_ > input_ids, input_ids >= self.vob_end_id_)\n tmp_input_ids = (input_ids - self.vob_start_id_)\n tmp_input_ids[input_mask] = 0\n input_embdings = torch.embedding(layer_weight.wte_weight_, tmp_input_ids, padding_idx=-1)\n input_embdings[input_mask] = 0.0\n if self.world_size_ > 1:\n dist.all_reduce(input_embdings, op=dist.ReduceOp.SUM, async_op=False)\n return input_embdings" }, { "identifier": "LlamaPostLayerInfer", "path": "slora/models/llama/layer_infer/post_layer_infer.py", "snippet": "class LlamaPostLayerInfer(PostLayerInferTpl):\n \"\"\"\n \"\"\"\n\n def __init__(self, tp_rank, world_size, network_config, mode):\n super().__init__(tp_rank, world_size, network_config, mode)\n self.eps_ = network_config[\"rms_norm_eps\"]\n self.vocab_size_ = network_config[\"vocab_size\"]\n self.embed_dim_ = network_config[\"n_embed\"]\n return\n \n def _norm(self, input, infer_state, layer_weight:LlamaPreAndPostLayerWeight) -> torch.Tensor:\n return rmsnorm_forward(input, layer_weight.final_norm_weight_, eps=self.eps_)\n\n def soft_max(self, data):\n return torch.softmax(data.permute(1, 0).float(), dim=-1)\n\n def token_forward(self, input_embdings, infer_state: LlamaInferStateInfo, layer_weight: LlamaPreAndPostLayerWeight, return_logics=False):\n batch_size = infer_state.batch_size\n last_input = torch.empty((batch_size, self.embed_dim_), device=input_embdings.device, dtype=torch.float16)\n if infer_state.is_prefill:\n last_index = torch.cumsum(infer_state.b_seq_len, dim=0, dtype=torch.long) - 1\n last_input[:, :] = input_embdings[last_index, :]\n else:\n last_input[:, :] = input_embdings[-batch_size:, :]\n input_embdings = None\n last_input = self._norm(last_input, infer_state, layer_weight)\n last_input = rearrange(last_input, \"batch embed_dim -> embed_dim batch\").contiguous().reshape(-1, batch_size)\n logic_batch = torch.mm(layer_weight.lm_head_weight_, last_input)\n last_input = None\n if self.world_size_ == 1:\n gather_data = logic_batch\n else:\n gather_data = torch.empty((self.vocab_size_, batch_size), device=logic_batch.device, dtype=torch.float16)\n split_size = self.vocab_size_ // self.world_size_\n dist.all_gather([gather_data[i * split_size: (i + 1) * split_size, :]\n for i in range(self.world_size_)], logic_batch, group=None, async_op=False)\n logic_batch = None\n\n if not return_logics:\n prob_out = self.soft_max(gather_data)\n gather_data = None\n return prob_out\n else:\n ans_logics = gather_data.permute(1, 0).float()\n gather_data = None\n return ans_logics" }, { "identifier": "LlamaTransformerLayerInfer", "path": "slora/models/llama/layer_infer/transformer_layer_infer.py", "snippet": "class LlamaTransformerLayerInfer(TransformerLayerInferTpl):\n \"\"\"\n \"\"\"\n\n def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):\n super().__init__(layer_num, tp_rank, world_size, network_config, mode)\n self.eps_ = network_config[\"rms_norm_eps\"]\n self.tp_q_head_num_ = network_config[\"num_attention_heads\"] // self.world_size_\n self.tp_k_head_num_ = self.tp_q_head_num_\n self.tp_v_head_num_ = self.tp_q_head_num_\n self.tp_o_head_num_ = self.tp_q_head_num_\n self.head_dim_ = network_config[\"hidden_size\"] // network_config[\"num_attention_heads\"]\n self.embed_dim_ = network_config[\"hidden_size\"]\n return\n\n \n def _att_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n return rmsnorm_forward(input, weight=layer_weight.att_norm_weight_, eps=self.eps_)\n \n def _ffn_norm(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n return rmsnorm_forward(input, weight=layer_weight.ffn_norm_weight_, eps=self.eps_)\n\n def _get_qkv(self, input, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n q = torch.mm(input.view(-1, self.embed_dim_), layer_weight.q_weight_)\n rotary_emb_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), infer_state.position_cos, infer_state.position_sin)\n torch.mm(input.view(-1, self.embed_dim_), layer_weight.k_weight_,\n out=cache_k.view(-1, self.tp_k_head_num_ * self.head_dim_))\n rotary_emb_fwd(cache_k, infer_state.position_cos, infer_state.position_sin)\n torch.mm(input.view(-1, self.embed_dim_), layer_weight.v_weight_,\n out=cache_v.view(-1, self.tp_v_head_num_ * self.head_dim_))\n return q\n \n def _post_cache_kv(self, cache_k, cache_v, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight):\n mem_manager = infer_state.mem_manager\n if infer_state.is_prefill:\n self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.prefill_mem_index, mem_manager)\n return\n else:\n if not infer_state.decode_is_contiguous:\n self._copy_kv_to_mem_cache(cache_k, cache_v, infer_state.decode_mem_index, mem_manager)\n return\n return\n \n def _context_attention_kernel(self, q, k, v, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:\n o_tensor = torch.empty_like(q)\n context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_),\n k.view(-1, self.tp_k_head_num_, self.head_dim_),\n v.view(-1, self.tp_v_head_num_, self.head_dim_),\n o_tensor.view(-1, self.tp_q_head_num_, self.head_dim_),\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n return o_tensor\n \n def _token_attention_kernel(self, q, infer_state:LlamaInferStateInfo, layer_weight)->torch.Tensor:\n return self._token_decode_attention_mode(q, infer_state)\n\n def _get_o(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n o_tensor = torch.mm(input.view(-1, self.tp_o_head_num_ * self.head_dim_), layer_weight.o_weight_)\n return o_tensor\n\n def _ffn(self, input, infer_state:LlamaInferStateInfo, layer_weight:LlamaTransformerLayerWeight)->torch.Tensor:\n gate_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.gate_proj)\n torch.nn.functional.silu(gate_out, inplace=True)\n up_out = torch.mm(input.view(-1, self.embed_dim_), layer_weight.up_proj)\n input = None\n ffn1_out = gate_out * up_out\n gate_out, up_out = None, None\n ffn2_out = torch.mm(ffn1_out, layer_weight.down_proj)\n ffn1_out = None\n return ffn2_out\n \n def _copy_kv_to_mem_cache(self, key_buffer, value_buffer, mem_index, mem_manager):\n if \"int8kv\" in self.mode:\n destindex_copy_quantize_kv(key_buffer,\n mem_index,\n mem_manager.key_buffer[self.layer_num_],\n mem_manager.key_scale_buffer[self.layer_num_])\n destindex_copy_quantize_kv(value_buffer,\n mem_index,\n mem_manager.value_buffer[self.layer_num_],\n mem_manager.value_scale_buffer[self.layer_num_])\n else:\n destindex_copy_kv(key_buffer, mem_index, mem_manager.key_buffer[self.layer_num_])\n destindex_copy_kv(value_buffer, mem_index, mem_manager.value_buffer[self.layer_num_])\n \n def _token_decode_attention_normal(self, q, infer_state: LlamaInferStateInfo):\n total_token_num = infer_state.total_token_num\n batch_size = infer_state.batch_size\n calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_)\n att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device=\"cuda\")\n\n token_att_fwd(q.view(calcu_shape1),\n infer_state.mem_manager.key_buffer[self.layer_num_],\n att_m_tensor,\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n \n if triton.__version__ == \"2.0.0\":\n prob = torch.empty_like(att_m_tensor)\n token_softmax_fwd(att_m_tensor, infer_state.b_start_loc, infer_state.b_seq_len, prob, infer_state.max_len_in_batch)\n att_m_tensor = None\n\n o_tensor = torch.empty_like(q)\n\n token_att_fwd2(prob,\n infer_state.mem_manager.value_buffer[self.layer_num_],\n o_tensor.view(calcu_shape1),\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n prob = None\n return o_tensor\n elif triton.__version__ >= \"2.1.0\":\n o_tensor = torch.empty_like(q)\n from slora.models.llama.triton_kernel.token_attention_softmax_and_reducev import token_softmax_reducev_fwd\n token_softmax_reducev_fwd(att_m_tensor, \n infer_state.mem_manager.value_buffer[self.layer_num_],\n o_tensor.view(calcu_shape1),\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch,\n infer_state.other_kv_index)\n return o_tensor\n else:\n raise Exception(\"not support triton version\")\n\n def _token_decode_attention_int8kv(self, q, infer_state: LlamaInferStateInfo):\n total_token_num = infer_state.total_token_num\n batch_size = infer_state.batch_size\n calcu_shape1 = (batch_size, self.tp_q_head_num_, self.head_dim_)\n att_m_tensor = torch.empty((self.tp_q_head_num_, total_token_num), dtype=q.dtype, device=\"cuda\")\n token_att_fwd_int8k(q.view(calcu_shape1),\n infer_state.mem_manager.key_buffer[self.layer_num_],\n infer_state.mem_manager.key_scale_buffer[self.layer_num_],\n att_m_tensor,\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n\n prob = torch.empty_like(att_m_tensor)\n token_softmax_fwd(att_m_tensor, infer_state.b_start_loc, infer_state.b_seq_len, prob, infer_state.max_len_in_batch)\n att_m_tensor = None\n\n o_tensor = torch.empty_like(q)\n token_att_fwd2_int8v(prob,\n infer_state.mem_manager.value_buffer[self.layer_num_],\n infer_state.mem_manager.value_scale_buffer[self.layer_num_],\n o_tensor.view(calcu_shape1),\n infer_state.b_loc,\n infer_state.b_start_loc,\n infer_state.b_seq_len,\n infer_state.max_len_in_batch)\n prob = None\n return o_tensor\n \n def _token_decode_attention_mode(self, q, infer_state: LlamaInferStateInfo):\n if \"int8kv\" in self.mode:\n return self._token_decode_attention_int8kv(q, infer_state)\n else:\n return self._token_decode_attention_normal(q, infer_state)" }, { "identifier": "LlamaPreAndPostLayerWeight", "path": "slora/models/llama/layer_weights/pre_and_post_layer_weight.py", "snippet": "class LlamaPreAndPostLayerWeight(PreAndPostLayerWeight):\n def __init__(self, tp_rank, world_size, data_type, network_config, mode):\n super().__init__(tp_rank, world_size, data_type, network_config, mode)\n return\n\n\n def load_dummy_weights(self):\n vob_size = self.network_config_[\"vocab_size\"]\n split_vob_size = vob_size // self.world_size_\n n_embed = self.network_config_[\"hidden_size\"]\n self.wte_weight_ = (torch.rand((split_vob_size, n_embed), \n dtype=self.data_type_, device=\"cuda\").contiguous() * 2 - 1) * 1e-3\n self.lm_head_weight_ = (torch.rand((split_vob_size, n_embed), \n dtype=self.data_type_, device=\"cuda\").contiguous() * 2 - 1) * 1e-3\n self.final_norm_weight_ = (torch.rand((n_embed), \n dtype=self.data_type_, device=\"cuda\") * 2 - 1) * 1e-3\n \n\n def load_hf_weights(self, weights, dummy=False):\n if dummy:\n self.load_dummy_weights()\n return\n\n vob_size = self.network_config_[\"vocab_size\"]\n split_vob_size = vob_size // self.world_size_\n n_embed = self.network_config_[\"hidden_size\"]\n if \"model.embed_tokens.weight\" in weights:\n # print(weights['model.embed_tokens.weight'].shape)\n self.wte_weight_ = self._cuda(weights['model.embed_tokens.weight'][split_vob_size *\n self.tp_rank_: split_vob_size * (self.tp_rank_ + 1), :])\n if 'lm_head.weight' in weights:\n # print(weights['lm_head.weight'].shape)\n self.lm_head_weight_ = self._cuda(weights['lm_head.weight'][split_vob_size * self.tp_rank_: split_vob_size *\n (self.tp_rank_ + 1), :])\n if 'model.norm.weight' in weights:\n self.final_norm_weight_ = self._cuda(weights['model.norm.weight'])\n\n return\n \n def verify_load(self):\n errors = \"weights load not ok\"\n weights = [self.wte_weight_, \n self.lm_head_weight_, \n self.final_norm_weight_]\n for i in range(len(weights)):\n assert weights[i] is not None, \"index:\" + str(i) + \" \" + errors\n return " }, { "identifier": "LlamaTransformerLayerWeight", "path": "slora/models/llama/layer_weights/transformer_layer_weight.py", "snippet": "class LlamaTransformerLayerWeight(TransformerLayerWeight):\n def __init__(self, layer_num, tp_rank, world_size, data_type, network_config, mode=[]):\n super().__init__(layer_num, tp_rank, world_size, data_type, network_config, mode)\n\n\n def load_hf_weights(self, weights, dummy=False):\n if dummy:\n self._load_qkvo_dummy_weights()\n self._load_ffn_dummy_weights()\n else:\n self._load_qkvo_weights(weights)\n self._load_ffn_weights(weights)\n\n \n def verify_load(self):\n errors = \"weights load not ok\"\n weights = [self.att_norm_weight_,\n self.q_weight_,\n self.k_weight_,\n self.v_weight_,\n self.o_weight_,\n self.ffn_norm_weight_,\n self.up_proj,\n self.gate_proj,\n self.down_proj\n ]\n for i in range(len(weights)):\n assert weights[i] is not None, \"index:\" + str(i) + \" \" + errors\n\n\n def _load_qkvo_dummy_weights(self):\n n_embed = self.network_config_[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n # input layernorm params\n self.att_norm_weight_ = (torch.rand((n_embed), dtype=self.data_type_, device=\"cuda\") * 2 - 1) * 1e-3\n # attention params\n self.q_weight_ = (torch.rand((split_n_embed, n_embed), \n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.k_weight_ = (torch.rand((split_n_embed, n_embed), \n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.v_weight_ = (torch.rand((split_n_embed, n_embed), \n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n # attention output dense params\n self.o_weight_ = (torch.rand((n_embed, split_n_embed),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n \n\n def _load_ffn_dummy_weights(self):\n n_embed = self.network_config_[\"hidden_size\"]\n inter_size = self.network_config_['intermediate_size']\n split_inter_size = inter_size // self.world_size_\n\n self.ffn_norm_weight_ = (torch.rand((n_embed), dtype=self.data_type_, device=\"cuda\") * 2 - 1) * 1e-3\n\n self.up_proj = (torch.rand((split_inter_size, n_embed),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.gate_proj = (torch.rand((split_inter_size, n_embed),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n self.down_proj = (torch.rand((n_embed, split_inter_size),\n dtype=self.data_type_, device=\"cuda\").transpose(0, 1).contiguous() * 2 - 1) * 1e-3\n\n\n def _load_qkvo_weights(self, weights):\n # input layernorm params\n if f\"model.layers.{self.layer_num_}.input_layernorm.weight\" in weights:\n self.att_norm_weight_ = self._cuda(weights[f\"model.layers.{self.layer_num_}.input_layernorm.weight\"])\n\n n_embed = self.network_config_[\"hidden_size\"]\n split_n_embed = n_embed // self.world_size_\n # q k v weights for llama\n if f\"model.layers.{self.layer_num_}.self_attn.q_proj.weight\" in weights:\n self.q_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.q_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.q_weight_ = self._cuda(self.q_weight_.transpose(0, 1))\n if f\"model.layers.{self.layer_num_}.self_attn.k_proj.weight\" in weights:\n self.k_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.k_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.k_weight_ = self._cuda(self.k_weight_.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.self_attn.v_proj.weight\" in weights:\n self.v_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.v_proj.weight\"][split_n_embed *\n self.tp_rank_: split_n_embed * (self.tp_rank_ + 1), :]\n self.v_weight_ = self._cuda(self.v_weight_.transpose(0, 1))\n \n # attention output dense params\n if f\"model.layers.{self.layer_num_}.self_attn.o_proj.weight\" in weights:\n self.o_weight_ = weights[f\"model.layers.{self.layer_num_}.self_attn.o_proj.weight\"][:,\n split_n_embed * self.tp_rank_: split_n_embed * (self.tp_rank_ + 1)]\n self.o_weight_ = self._cuda(self.o_weight_.transpose(0, 1))\n \n\n def _load_ffn_weights(self, weights):\n if f\"model.layers.{self.layer_num_}.post_attention_layernorm.weight\" in weights:\n self.ffn_norm_weight_ = self._cuda(weights[f\"model.layers.{self.layer_num_}.post_attention_layernorm.weight\"])\n \n inter_size = self.network_config_['intermediate_size']\n split_inter_size = inter_size // self.world_size_\n\n if f\"model.layers.{self.layer_num_}.mlp.up_proj.weight\" in weights:\n self.up_proj = weights[f\"model.layers.{self.layer_num_}.mlp.up_proj.weight\"][split_inter_size *\n self.tp_rank_: split_inter_size * (self.tp_rank_ + 1), :]\n self.up_proj = self._cuda(self.up_proj.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.mlp.gate_proj.weight\" in weights:\n self.gate_proj = weights[f\"model.layers.{self.layer_num_}.mlp.gate_proj.weight\"][split_inter_size *\n self.tp_rank_: split_inter_size * (self.tp_rank_ + 1), :]\n self.gate_proj = self._cuda(self.gate_proj.transpose(0, 1))\n\n if f\"model.layers.{self.layer_num_}.mlp.down_proj.weight\" in weights:\n self.down_proj = weights[f\"model.layers.{self.layer_num_}.mlp.down_proj.weight\"][:,\n split_inter_size * self.tp_rank_: split_inter_size * (self.tp_rank_ + 1)]\n self.down_proj = self._cuda(self.down_proj.transpose(0, 1))" }, { "identifier": "LlamaInferStateInfo", "path": "slora/models/llama/infer_struct.py", "snippet": "class LlamaInferStateInfo(InferStateInfo):\n def __init__(self):\n super().__init__()\n self.position_cos = None\n self.position_sin = None\n self.other_kv_index = None\n \n def init_some_extra_state(self, \n model, \n batch_size, \n total_token_num,\n max_len_in_batch,\n input_ids : torch.Tensor,\n b_loc : torch.Tensor,\n b_start_loc : torch.Tensor,\n b_seq_len : torch.Tensor,\n is_prefill):\n if is_prefill:\n b_seq_len_numpy = b_seq_len.cpu().numpy()\n position_ids = torch.from_numpy(np.concatenate([np.arange(0, b_seq_len_numpy[i])\n for i in range(len(b_seq_len_numpy))], axis=0)).cuda()\n self.position_cos = torch.index_select(model._cos_cached, 0, position_ids).view(position_ids.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, position_ids).view(position_ids.shape[0], -1)\n position_ids = None\n else:\n self.position_cos = torch.index_select(model._cos_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.position_sin = torch.index_select(model._sin_cached, 0, b_seq_len - 1).view(b_seq_len.shape[0], -1)\n self.other_kv_index = b_loc[0, max_len_in_batch - 1].item()\n return" }, { "identifier": "MemoryAllocator", "path": "slora/common/mem_allocator.py", "snippet": "class MemoryAllocator:\n def __init__(self, tot_size, cache_size, dtype, head_num, head_dim, layer_num):\n assert tot_size >= cache_size\n self.dtype = dtype\n self.head_num = head_num\n self.head_dim = head_dim\n self.layer_num = layer_num\n self.cell_size = head_num * head_dim\n\n self.tot_size = tot_size\n self.cache_size = cache_size\n\n self.reset_all_pool()\n\n\n def get_memory_size(self):\n dsize = 2 if self.dtype == torch.float16 else None\n return 2 * self.layer_num * self.tot_size * self.cell_size * dsize\n \n\n def alloc(self, need_size):\n if need_size > self.can_use_mem_size:\n raise Exception(f'warn no enough pool space: need_size {need_size} left_size {self.can_use_mem_size}')\n \n torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self._mem_cum_sum)\n select_index = torch.logical_and(self._mem_cum_sum <= need_size, self.mem_state == 1)\n select_index = self.indexes[select_index]\n self.mem_state[select_index] = 0\n self.can_use_mem_size -= len(select_index)\n return select_index\n\n\n def alloc_contiguous(self, need_size):\n if need_size > self.can_use_mem_size:\n raise Exception(f'warn no enough pool space: need_size {need_size} left_size {self.can_use_mem_size}')\n \n torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self._mem_cum_sum)\n loc_sums = self._mem_cum_sum[need_size - 1:self.tot_size] - self._mem_cum_sum[0:self.tot_size - need_size + 1] + self.mem_state[0:self.tot_size - need_size + 1]\n can_used_loc = self.indexes[0:self.tot_size - need_size + 1][loc_sums == need_size]\n if can_used_loc.shape[0] == 0:\n # print(f'warn no enough pool space: to contiguous need_size {need_size} left_size {self.can_use_mem_size}')\n return None\n start_loc = can_used_loc[0]\n select_index = self.indexes[start_loc : start_loc + need_size]\n \n self.mem_state[select_index] = 0\n self.can_use_mem_size -= need_size\n start = start_loc.item()\n end = start + need_size\n return select_index, start, end\n\n\n def alloc_strip(self, need_block, block_size):\n torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self._mem_cum_sum)\n loc_sums = self._mem_cum_sum[block_size - 1:self.tot_size] - self._mem_cum_sum[0:self.tot_size - block_size + 1] + self.mem_state[0:self.tot_size - block_size + 1]\n loc_use = (loc_sums == block_size)\n torch.cumsum(loc_use, dim=0, dtype=torch.int32, out=loc_sums)\n\n block_start = torch.empty((loc_use.shape[0]), dtype=torch.int32, device=\"cuda\")\n block_start[0] = loc_use[0]\n block_start[1:] = (loc_use[:-1] == 0) & (loc_use[1:] == 1)\n\n cum_max, _ = torch.cummax(block_start, dim=0)\n # (diff % block_size == 0) & loc_use\n mask = block_size - 1\n loc_use = (((loc_sums - cum_max) & mask) == 0) & loc_use\n can_use_loc = self.indexes[0:self.tot_size - block_size + 1][loc_use == 1]\n if can_use_loc.shape[0] < need_block:\n raise Exception(f\"no enough pool space for alloc_strip, \"\n f\"need {need_block} blocks, {can_use_loc.shape[0]} left\")\n can_use_loc = can_use_loc[:need_block]\n select_index = torch.empty((block_size, need_block), dtype=torch.int32, device=\"cuda\")\n for i in range(block_size):\n select_index[i] = can_use_loc + i\n select_index = select_index.T.reshape(-1)\n\n self.mem_state[select_index] = 0\n self.can_use_mem_size -= select_index.shape[0]\n return select_index\n\n\n def alloc_grid(self, need_grid, grid_size):\n torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self._mem_cum_sum)\n loc_sums = self._mem_cum_sum[grid_size - 1:self.tot_size] - self._mem_cum_sum[0:self.tot_size - grid_size + 1] + self.mem_state[0:self.tot_size - grid_size + 1]\n loc_use = (loc_sums == grid_size)\n\n mask = grid_size - 1\n loc_use = ((self.indexes[:self.tot_size - grid_size + 1] & mask) == 0) & loc_use\n can_use_loc = self.indexes[0:self.tot_size - grid_size + 1][loc_use == 1]\n if can_use_loc.shape[0] < need_grid:\n raise Exception(f\"no enough pool space for alloc_strip, \"\n f\"need {need_grid} grids, {can_use_loc.shape[0]} left\")\n can_use_loc = can_use_loc[:need_grid]\n select_index = torch.empty((grid_size, need_grid), dtype=torch.int32, device=\"cuda\")\n for i in range(grid_size):\n select_index[i] = can_use_loc + i\n select_index = select_index.T.reshape(-1)\n\n self.mem_state[select_index] = 0\n self.can_use_mem_size -= select_index.shape[0]\n return select_index\n\n\n def alloc_prefix(self, need_size):\n assert False\n if need_size > self.can_use_mem_size_prefix:\n raise Exception(f'warn no enough pool space: need_size {need_size} left_size {self.can_use_mem_size_prefix}')\n \n torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self._mem_cum_sum)\n select_index = torch.logical_and(self._mem_cum_sum <= need_size, self.mem_state == 1)\n select_index = self.indexes[select_index]\n self.mem_state[select_index] = 0\n self.can_use_mem_size_prefix -= len(select_index)\n return select_index\n \n\n def alloc_contiguous_prefix(self, need_size):\n assert False\n if need_size > self.can_use_mem_size_prefix:\n raise Exception(f'warn no enough pool space: need_size {need_size} left_size {self.can_use_mem_size_prefix}')\n \n torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self._mem_cum_sum)\n loc_sums = self._mem_cum_sum[need_size - 1:self.cache_size] - self._mem_cum_sum[0:self.cache_size - need_size + 1] + self.mem_state[0:self.cache_size - need_size + 1]\n can_used_loc = self.indexes[0:self.cache_size - need_size + 1][loc_sums == need_size]\n if can_used_loc.shape[0] == 0:\n # print(f'warn no enough pool space: to contiguous need_size {need_size} left_size {self.can_use_mem_size_prefix}')\n return None\n start_loc = can_used_loc[0]\n select_index = self.indexes[start_loc : start_loc + need_size]\n \n self.mem_state[select_index] = 0\n self.can_use_mem_size_prefix -= need_size\n start = start_loc.item()\n end = start + need_size\n return select_index, start, end\n\n\n def alloc_suffix(self, need_size):\n assert False\n if need_size > self.can_use_mem_size_suffix:\n raise Exception(f'warn no enough pool space: need_size {need_size} left_size {self.can_use_mem_size_suffix}')\n return None\n \n self._mem_cum_sum = suffix_cumsum(self.mem_state, dim=0, dtype=torch.int32)\n select_index = torch.logical_and(self._mem_cum_sum <= need_size, self.mem_state == 1)\n select_index = self.indexes[select_index]\n self.mem_state[select_index] = 0\n self.can_use_mem_size_suffix -= len(select_index)\n return select_index\n \n\n def alloc_contiguous_suffix(self, need_size):\n assert False\n if need_size > self.can_use_mem_size_suffix:\n raise Exception(f'warn no enough pool space: need_size {need_size} left_size {self.can_use_mem_size_suffix}')\n return None\n \n self._mem_cum_sum = suffix_cumsum(self.mem_state, dim=0, dtype=torch.int32)\n assert len(self._mem_cum_sum) == self.cache_size\n loc_sums = (self._mem_cum_sum[0:self.cache_size - need_size + 1] - self._mem_cum_sum[need_size - 1:] +\n self.mem_state[need_size - 1:])\n can_used_loc = self.indexes[0:self.cache_size - need_size + 1][loc_sums == need_size]\n if can_used_loc.shape[0] == 0:\n # print(f'warn no enough pool space: to contiguous need_size {need_size} left_size {self.can_use_mem_size_suffix}')\n return None\n start_loc = can_used_loc[0]\n select_index = self.indexes[start_loc : start_loc + need_size]\n \n self.mem_state[select_index] = 0\n self.can_use_mem_size_suffix -= need_size\n start = start_loc.item()\n end = start + need_size\n return select_index, start, end\n \n \n def free(self, free_index):\n \"\"\"_summary_\n\n Args:\n free_index (torch.Tensor): _description_\n \"\"\"\n self.can_use_mem_size += free_index.shape[0]\n # self.can_use_mem_size_prefix += torch.sum(free_index < self.cache_size)\n # self.can_use_mem_size_suffix += torch.sum(free_index >= self.cache_size)\n self.mem_state[free_index] = 1\n\n # if self.can_use_mem_size_prefix + self.can_use_mem_size_suffix == self.tot_size:\n # print(f\"freed all gpu mem size {self.tot_size}\")\n # print(f\"free state {self.can_use_mem_size_prefix} + {self.can_use_mem_size_suffix} all {self.tot_size}\")\n return\n \n def free_all(self):\n self.mem_state[:] = 1\n self.can_use_mem_size = self.tot_size\n # self.can_use_mem_size_prefix = self.cache_size\n # self.can_use_mem_size_suffix = self.tot_size - self.cache_size\n \n\n def delete_all_pool(self):\n self.mem_state = None\n self._mem_cum_sum = None\n self.indexes = None\n self.can_use_mem_size = 0\n # self.can_use_mem_size_prefix = 0\n # self.can_use_mem_size_suffix = 0\n self.buffer = None\n gc.collect()\n\n def delete_all_cache(self):\n self.delete_all_pool()\n\n\n def reset_all_pool(self):\n self.mem_state = torch.ones((self.tot_size,), dtype=torch.bool, device=\"cuda\")\n self._mem_cum_sum = torch.empty((self.tot_size,), dtype=torch.int32, device=\"cuda\")\n self.indexes = torch.arange(0, self.tot_size, dtype=torch.long, device=\"cuda\")\n self.can_use_mem_size = self.tot_size\n # self.can_use_mem_size_prefix = self.cache_size\n # self.can_use_mem_size_suffix = self.tot_size - self.cache_size\n self.key_buffer = [torch.empty((self.tot_size, self.head_num, self.head_dim),\n dtype=self.dtype, device=\"cuda\")\n for _ in range(self.layer_num)]\n self.value_buffer = [torch.empty((self.tot_size, self.head_num, self.head_dim),\n dtype=self.dtype, device=\"cuda\")\n for _ in range(self.layer_num)]\n \n\n def reset_all_cache(self):\n self.reset_all_pool()" }, { "identifier": "INT8KVMemoryManager", "path": "slora/common/int8kv_mem_manager.py", "snippet": "class INT8KVMemoryManager(MemoryManager):\n def __init__(self, size, dtype, head_num, head_dim, layer_num, always_copy=True):\n super().__init__(size, dtype, head_num, head_dim, layer_num, always_copy=True)\n\n def _init_buffers(self, size, dtype, head_num, head_dim, layer_num):\n self.key_buffer = [torch.empty((size, head_num, head_dim), dtype=torch.int8, device=\"cuda\") for _ in range(layer_num)]\n self.value_buffer = [torch.empty((size, head_num, head_dim), dtype=torch.int8, device=\"cuda\") for _ in range(layer_num)]\n self.key_scale_buffer = [torch.empty((size, head_num, 1), dtype=dtype, device=\"cuda\") for _ in range(layer_num)]\n self.value_scale_buffer = [torch.empty((size, head_num, 1), dtype=dtype, device=\"cuda\") for _ in range(layer_num)]" }, { "identifier": "TpPartBaseModel", "path": "slora/common/basemodel/basemodel.py", "snippet": "class TpPartBaseModel:\n # weight class\n pre_and_post_weight_class = None\n transformer_weight_class = None\n\n # infer class\n pre_layer_infer_class = None\n post_layer_infer_class = None\n transformer_layer_infer_class = None\n\n # infer state class\n infer_state_class = InferStateInfo\n\n def __init__(self, tp_rank, world_size, weight_dir,\n max_total_token_num, mem_adapter_size, load_way=\"HF\", mode=[], dummy=False):\n self.tp_rank_ = tp_rank\n self.world_size_ = world_size\n self.weight_dir_ = weight_dir\n self.max_total_token_num = max_total_token_num\n self.mem_adapter_size = mem_adapter_size\n self.load_way = load_way\n self.mode = mode\n self.dummy = dummy\n\n self._init_config()\n self._verify_must()\n self._verify_params()\n self._init_weights()\n self._init_mem_manager()\n self._init_infer_layer()\n self._init_some_value()\n self._init_custom()\n return\n \n def _init_config(self):\n if self.dummy:\n self.config = get_config_json(self.weight_dir_)\n else:\n self.config, self.weight_dir_ = hf_load_config(self.weight_dir_, mode=\"model\")\n # rename keys\n repair_config(self.config, same_names=[\"num_attention_heads\", \"n_head\"])\n repair_config(self.config, same_names=[\"hidden_size\", \"n_embd\", \"n_embed\"])\n repair_config(self.config, same_names=[\"num_hidden_layers\", \"n_layer\"])\n\n return\n \n @final\n def _verify_must(self):\n assert self.config[\"num_attention_heads\"] % self.world_size_ == 0\n return\n \n def _verify_params(self):\n assert self.load_way == \"HF\", \"only support HF format weights\"\n return\n\n def _init_weights(self):\n self.pre_post_weight = self.pre_and_post_weight_class(self.tp_rank_, self.world_size_, torch.float16, network_config=self.config, mode=self.mode)\n self.trans_layers_weight = [\n self.transformer_weight_class(i, self.tp_rank_, self.world_size_, torch.float16, network_config=self.config, mode=self.mode)\n for i in range(self.config[\"n_layer\"])\n ]\n load_hf_weights(\n \"fp16\",\n weight_dir=self.weight_dir_,\n pre_post_layer=self.pre_post_weight,\n transformer_layer_list=self.trans_layers_weight,\n dummy=self.dummy)\n self.pre_post_weight.verify_load()\n [weight.verify_load() for weight in self.trans_layers_weight]\n return \n \n def _init_mem_manager(self):\n assert self.config[\"num_attention_heads\"] % self.world_size_ == 0\n self.mem_manager = MemoryAllocator(\n tot_size=self.max_total_token_num + self.mem_adapter_size,\n cache_size=self.max_total_token_num, \n dtype=torch.float16,\n head_num=self.config[\"num_attention_heads\"] // self.world_size_,\n head_dim=self.config[\"n_embed\"] // self.config[\"num_attention_heads\"],\n layer_num=self.config[\"n_layer\"])\n return \n \n def _init_infer_layer(self):\n self.pre_infer = self.pre_layer_infer_class(tp_rank=self.tp_rank_, world_size=self.world_size_, network_config=self.config, mode=self.mode)\n self.post_infer = self.post_layer_infer_class(tp_rank=self.tp_rank_, world_size=self.world_size_, network_config=self.config, mode=self.mode)\n self.layers_infer = [\n self.transformer_layer_infer_class(\n i,\n tp_rank=self.tp_rank_,\n world_size=self.world_size_,\n network_config=self.config,\n mode=self.mode) for i in range(\n self.config[\"n_layer\"])]\n return\n \n def _init_some_value(self):\n self.head_dim_ = self.config[\"n_embed\"] // self.config[\"num_attention_heads\"]\n self.tp_k_head_num_ = self.config[\"num_attention_heads\"] // self.world_size_\n self.tp_v_head_num_ = self.tp_k_head_num_\n self.layers_num = self.config[\"n_layer\"]\n self.vocab_size = self.config[\"vocab_size\"]\n return\n \n def _init_custom(self):\n pass\n\n\n @torch.no_grad()\n def forward(\n self,\n batch_size,\n total_token_num,\n max_len_in_batch,\n input_ids : torch.Tensor,\n b_loc : torch.Tensor,\n b_start_loc : torch.Tensor,\n b_seq_len : torch.Tensor,\n is_prefill=True):\n if is_prefill:\n return self._prefill(batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len)\n else:\n return self._decode(batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len)\n\n \n def _prefill(self, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len):\n infer_state = self.infer_state_class()\n infer_state.is_prefill = True\n infer_state.batch_size = batch_size\n infer_state.total_token_num = total_token_num\n infer_state.max_len_in_batch = max_len_in_batch\n assert (input_ids.shape[0] == total_token_num)\n assert (b_loc.shape[0] == b_start_loc.shape[0] == b_seq_len.shape[0])\n infer_state.b_loc = b_loc\n infer_state.b_start_loc = b_start_loc\n infer_state.b_seq_len = b_seq_len\n\n infer_state.mem_manager = self.mem_manager\n infer_state.prefill_mem_index = self.mem_manager.alloc(infer_state.total_token_num)\n infer_state.prefill_key_buffer = torch.empty((infer_state.total_token_num, self.tp_k_head_num_, self.head_dim_), dtype=torch.float16, device=\"cuda\")\n infer_state.prefill_value_buffer = torch.empty((infer_state.total_token_num, self.tp_v_head_num_, self.head_dim_), dtype=torch.float16, device=\"cuda\")\n init_bloc(b_loc, b_seq_len, max_len_in_batch, infer_state.prefill_mem_index)\n\n infer_state.init_some_extra_state(self, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, True)\n predict_logics = self._context_forward(input_ids, infer_state)\n return predict_logics\n \n def _decode(self, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len):\n infer_state = self.infer_state_class()\n infer_state.is_prefill = False\n infer_state.batch_size = batch_size\n infer_state.total_token_num = total_token_num\n infer_state.max_len_in_batch = max_len_in_batch\n assert (b_loc.shape[0] == b_start_loc.shape[0] == b_seq_len.shape[0])\n infer_state.b_loc = b_loc\n infer_state.b_start_loc = b_start_loc\n infer_state.b_seq_len = b_seq_len\n \n infer_state.mem_manager = self.mem_manager\n\n alloc_mem = self.mem_manager.alloc_contiguous(batch_size)\n if alloc_mem is not None:\n infer_state.decode_is_contiguous = True\n infer_state.decode_mem_index = alloc_mem[0]\n infer_state.decode_mem_start = alloc_mem[1]\n infer_state.decode_mem_end = alloc_mem[2]\n b_loc[:, max_len_in_batch - 1] = infer_state.decode_mem_index\n else:\n infer_state.decode_is_contiguous = False\n alloc_mem = self.mem_manager.alloc(batch_size)\n infer_state.decode_mem_index = alloc_mem\n infer_state.decode_key_buffer = torch.empty((batch_size, self.tp_k_head_num_, self.head_dim_), dtype=torch.float16, device=\"cuda\")\n infer_state.decode_value_buffer = torch.empty((batch_size, self.tp_v_head_num_, self.head_dim_), dtype=torch.float16, device=\"cuda\")\n b_loc[:, max_len_in_batch - 1] = infer_state.decode_mem_index\n\n infer_state.init_some_extra_state(self, batch_size, total_token_num, max_len_in_batch, input_ids, b_loc, b_start_loc, b_seq_len, False)\n predict_logics = self._token_forward(input_ids, infer_state)\n return predict_logics\n \n @final\n def _context_forward(self, input_ids, infer_state: InferStateInfo):\n cuda_input_ids = input_ids\n input_embs = self.pre_infer.context_forward(cuda_input_ids, infer_state, self.pre_post_weight)\n for i in range(self.layers_num):\n input_embs = self.layers_infer[i].context_forward(input_embs, infer_state, self.trans_layers_weight[i])\n predict_logics = self.post_infer.token_forward(input_embs, infer_state, self.pre_post_weight, return_logics=True)\n return predict_logics\n\n @final\n def _token_forward(self, input_ids, infer_state: InferStateInfo):\n cuda_input_ids = input_ids\n input_embs = self.pre_infer.token_forward(cuda_input_ids, infer_state, self.pre_post_weight)\n for i in range(self.layers_num):\n input_embs = self.layers_infer[i].token_forward(input_embs, infer_state, self.trans_layers_weight[i])\n predict_logics = self.post_infer.token_forward(input_embs, infer_state, self.pre_post_weight, return_logics=True)\n return predict_logics" } ]
import os import json import torch from slora.models.llama.layer_infer.pre_layer_infer import LlamaPreLayerInfer from slora.models.llama.layer_infer.post_layer_infer import LlamaPostLayerInfer from slora.models.llama.layer_infer.transformer_layer_infer import LlamaTransformerLayerInfer from slora.models.llama.layer_weights.pre_and_post_layer_weight import LlamaPreAndPostLayerWeight from slora.models.llama.layer_weights.transformer_layer_weight import LlamaTransformerLayerWeight from slora.models.llama.infer_struct import LlamaInferStateInfo from slora.common.mem_allocator import MemoryAllocator from slora.common.int8kv_mem_manager import INT8KVMemoryManager from slora.common.basemodel import TpPartBaseModel
11,586
# from slora.common.mem_manager import MemoryManager class LlamaTpPartModel(TpPartBaseModel): # weight class pre_and_post_weight_class = LlamaPreAndPostLayerWeight transformer_weight_class = LlamaTransformerLayerWeight # infer class pre_layer_infer_class = LlamaPreLayerInfer post_layer_infer_class = LlamaPostLayerInfer
# from slora.common.mem_manager import MemoryManager class LlamaTpPartModel(TpPartBaseModel): # weight class pre_and_post_weight_class = LlamaPreAndPostLayerWeight transformer_weight_class = LlamaTransformerLayerWeight # infer class pre_layer_infer_class = LlamaPreLayerInfer post_layer_infer_class = LlamaPostLayerInfer
transformer_layer_infer_class = LlamaTransformerLayerInfer
2
2023-11-05 04:08:36+00:00
16k
fleet-ai/context
cli.py
[ { "identifier": "print_markdown", "path": "utils/utils.py", "snippet": "def print_markdown(message):\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rprint(Rule(style=\"white\"))\n elif line.startswith(\"!!!\"):\n rprint(Text(line[3:], style=\"#D5D7FB\"))\n else:\n rprint(Markdown(line))\n\n if \"\\n\" not in message and message.startswith(\">\"):\n print(\"\")" }, { "identifier": "print_exception", "path": "utils/utils.py", "snippet": "def print_exception(exc_type, exc_value, traceback_obj):\n traceback_details = traceback.extract_tb(traceback_obj)\n for filename, lineno, funcname, text in traceback_details:\n console.print(\n f\"File: {filename}, Line: {lineno}, Func: {funcname}, Text: {text}\"\n )\n console.print(f\"{exc_type.__name__}: {exc_value}\")" }, { "identifier": "extract_code_blocks", "path": "utils/utils.py", "snippet": "def extract_code_blocks(message):\n pattern = r\"```python\\n(.*?)```\"\n matches = re.findall(pattern, message, re.DOTALL)\n return \"\\n\".join(matches)" }, { "identifier": "print_help", "path": "utils/utils.py", "snippet": "def print_help():\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"Command\")\n table.add_column(\"Description\")\n\n # Add rows to the table for each command\n table.add_row(\"-k, --k_value\", \"Number of chunks to return\")\n table.add_row(\n \"-l, --libraries\",\n \"Limit your chat to a list of libraries. Usage: -l library1 library2 library3\",\n )\n table.add_row(\n \"-m, --model\", \"Specify the model. Default: gpt-4-1106-preview (gpt-4-turbo)\"\n )\n table.add_row(\n \"-c, --cite_sources\", \"Determines whether or not the AI model cites its sources\"\n )\n table.add_row(\"-h, --help\", \"Help\")\n\n # Create a panel with the table\n panel = Panel(table, title=\"Help\", border_style=\"blue\")\n\n # Print the panel\n rprint(panel)" }, { "identifier": "TextStream", "path": "utils/stream.py", "snippet": "class TextStream:\n def __init__(self):\n self.live = Live(console=Console(), auto_refresh=False)\n self.live.start()\n\n def print_stream(self, message):\n markdown = Markdown(message.strip() + \"●\")\n panel = Panel(markdown, box=MINIMAL)\n self.live.update(panel)\n self.live.refresh()\n\n def end_stream(self):\n self.live.stop()" }, { "identifier": "retrieve_context", "path": "utils/ai.py", "snippet": "def retrieve_context(query, k=10, filters=None):\n \"\"\"Gets the context from our libraries vector db for a given query.\n\n Args:\n query (str): User input query\n k (int, optional): number of retrieved results. Defaults to 10.\n \"\"\"\n\n # First, we query the API\n responses = retrieve(query, k=k, filters=filters)\n\n # Then, we build the prompt_with_context string\n prompt_with_context = \"\"\n for response in responses:\n prompt_with_context += f\"\\n\\n### Context {response['metadata']['url']} ###\\n{response['metadata']['text']}\"\n return {\"role\": \"user\", \"content\": prompt_with_context}" }, { "identifier": "construct_prompt", "path": "utils/ai.py", "snippet": "def construct_prompt(\n messages,\n context_message,\n model=\"gpt-4-1106-preview\",\n cite_sources=True,\n context_window=3000,\n):\n \"\"\"\n Constructs a RAG (Retrieval-Augmented Generation) prompt by balancing the token count of messages and context_message.\n If the total token count exceeds the maximum limit, it adjusts the token count of each to maintain a 1:1 proportion.\n It then combines both lists and returns the result.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n context_message (dict): Context message to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n List[dict]: The constructed RAG prompt.\n \"\"\"\n # Get the encoding; default to cl100k_base\n if model in OPENAI_MODELS:\n encoding = tiktoken.encoding_for_model(model)\n else:\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n\n # 1) calculate tokens\n reserved_space = 1000\n max_messages_count = int((context_window - reserved_space) / 2)\n max_context_count = int((context_window - reserved_space) / 2)\n\n # 2) construct prompt\n prompts = messages.copy()\n prompts.insert(0, {\"role\": \"system\", \"content\": SYSTEM_PROMPT})\n if cite_sources:\n prompts.insert(-1, {\"role\": \"user\", \"content\": PROMPT})\n\n # 3) find how many tokens each list has\n messages_token_count = len(\n encoding.encode(\n \"\\n\".join(\n [\n f\"<|im_start|>{message['role']}\\n{message['content']}<|im_end|>\"\n for message in prompts\n ]\n )\n )\n )\n context_token_count = len(\n encoding.encode(\n f\"<|im_start|>{context_message['role']}\\n{context_message['content']}<|im_end|>\"\n )\n )\n\n # 4) Balance the token count for each\n if (messages_token_count + context_token_count) > (context_window - reserved_space):\n # context has more than limit, messages has less than limit\n if (messages_token_count < max_messages_count) and (\n context_token_count > max_context_count\n ):\n max_context_count += max_messages_count - messages_token_count\n # messages has more than limit, context has less than limit\n elif (messages_token_count > max_messages_count) and (\n context_token_count < max_context_count\n ):\n max_messages_count += max_context_count - context_token_count\n\n # 5) Cut each list to the max count\n\n # Cut down messages\n while messages_token_count > max_messages_count:\n removed_encoding = encoding.encode(\n f\"<|im_start|>{prompts[1]['role']}\\n{prompts[1]['content']}<|im_end|>\"\n )\n messages_token_count -= len(removed_encoding)\n if messages_token_count < max_messages_count:\n prompts = (\n [prompts[0]]\n + [\n {\n \"role\": prompts[1][\"role\"],\n \"content\": encoding.decode(\n removed_encoding[\n : min(\n int(max_messages_count -\n messages_token_count),\n len(removed_encoding),\n )\n ]\n )\n .replace(\"<|im_start|>\", \"\")\n .replace(\"<|im_end|>\", \"\"),\n }\n ]\n + prompts[2:]\n )\n else:\n prompts = [prompts[0]] + prompts[2:]\n\n # Cut down context\n if context_token_count > max_context_count:\n # Taking a proportion of the content chars length\n reduced_chars_length = int(\n len(context_message[\"content\"]) *\n (max_context_count / context_token_count)\n )\n context_message[\"content\"] = context_message[\"content\"][:reduced_chars_length]\n\n # 6) Combine both lists\n prompts.insert(-1, context_message)\n\n return prompts" }, { "identifier": "get_remote_chat_response", "path": "utils/ai.py", "snippet": "def get_remote_chat_response(messages, model=\"gpt-4-1106-preview\"):\n \"\"\"\n Returns a streamed OpenAI chat response.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n str: The streamed OpenAI chat response.\n \"\"\"\n client = OpenAI(api_key=os.environ.get(\"OPENAI_API_KEY\"))\n\n try:\n response = client.chat.completions.create(\n model=model, messages=messages, temperature=0.2, stream=True\n )\n\n for chunk in response:\n current_context = chunk.choices[0].delta.content\n yield current_context\n\n except openai.AuthenticationError as error:\n print(\"401 Authentication Error:\", error)\n raise Exception(\n \"Invalid OPENAI_API_KEY. Please re-run with a valid key.\")\n\n except Exception as error:\n print(\"Streaming Error:\", error)\n raise Exception(\"Internal Server Error\")" }, { "identifier": "get_other_chat_response", "path": "utils/ai.py", "snippet": "def get_other_chat_response(messages, model=\"local-model\"):\n \"\"\"\n Returns a streamed chat response from a local server.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n str: The streamed chat response.\n \"\"\"\n try:\n if model == \"local-model\":\n url = \"http://localhost:1234/v1/chat/completions\"\n headers = {\"Content-Type\": \"application/json\"}\n data = {\n \"messages\": messages,\n \"temperature\": 0.2,\n \"max_tokens\": -1,\n \"stream\": True,\n }\n response = requests.post(\n url, headers=headers, data=json.dumps(data), stream=True, timeout=120\n )\n\n if response.status_code == 200:\n for chunk in response.iter_content(chunk_size=None):\n decoded_chunk = chunk.decode()\n if (\n \"data:\" in decoded_chunk\n and decoded_chunk.split(\"data:\")[1].strip()\n ): # Check if the chunk is not empty\n try:\n chunk_dict = json.loads(\n decoded_chunk.split(\"data:\")[1].strip()\n )\n yield chunk_dict[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n except json.JSONDecodeError:\n pass\n else:\n print(f\"Error: {response.status_code}, {response.text}\")\n raise Exception(\"Internal Server Error\")\n else:\n if not os.environ.get(\"OPENROUTER_API_KEY\"):\n raise Exception(\n f\"For non-OpenAI models, like {model}, set your OPENROUTER_API_KEY.\"\n )\n\n response = requests.post(\n url=\"https://openrouter.ai/api/v1/chat/completions\",\n headers={\n \"Authorization\": f\"Bearer {os.environ.get('OPENROUTER_API_KEY')}\",\n \"HTTP-Referer\": os.environ.get(\n \"OPENROUTER_APP_URL\", \"https://fleet.so/context\"\n ),\n \"X-Title\": os.environ.get(\"OPENROUTER_APP_TITLE\", \"Fleet Context\"),\n \"Content-Type\": \"application/json\",\n },\n data=json.dumps(\n {\"model\": model, \"messages\": messages, \"stream\": True}),\n stream=True,\n timeout=120,\n )\n if response.status_code == 200:\n for chunk in response.iter_lines():\n decoded_chunk = chunk.decode(\"utf-8\")\n if (\n \"data:\" in decoded_chunk\n and decoded_chunk.split(\"data:\")[1].strip()\n ): # Check if the chunk is not empty\n try:\n chunk_dict = json.loads(\n decoded_chunk.split(\"data:\")[1].strip()\n )\n yield chunk_dict[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n except json.JSONDecodeError:\n pass\n else:\n print(f\"Error: {response.status_code}, {response.text}\")\n raise Exception(\"Internal Server Error\")\n\n except requests.exceptions.RequestException as error:\n print(\"Request Error:\", error)\n raise Exception(\n \"Invalid request. Please check your request parameters.\")" }, { "identifier": "ARGUMENTS", "path": "constants/cli.py", "snippet": "ARGUMENTS = [\n {\n \"name\": \"k_value\",\n \"nickname\": \"k\",\n \"help_text\": \"Number of chunks to return\",\n \"type\": int,\n \"default\": 15,\n },\n {\n \"name\": \"libraries\",\n \"nickname\": \"l\",\n \"help_text\": \"Limit your chat to a list of libraries. Usage: -l library1 library2 library3\",\n \"type\": list,\n },\n {\n \"name\": \"model\",\n \"nickname\": \"m\",\n \"help_text\": \"Specify the model. Default: gpt-4\",\n \"type\": str,\n \"default\": \"gpt-4\"\n },\n {\n \"name\": \"cite_sources\",\n \"nickname\": \"c\",\n \"help_text\": \"Determines whether or not the AI model cites its sources\",\n \"type\": bool,\n \"default\": True,\n },\n {\n \"name\": \"local\",\n \"nickname\": \"n\",\n \"help_text\": \"Uses LMStudio for local models\",\n \"type\": bool,\n \"default\": False,\n },\n {\n \"name\": \"context_window\",\n \"nickname\": \"w\",\n \"help_text\": \"Context window (if using local models)\",\n \"type\": int,\n \"default\": 3000,\n },\n]" }, { "identifier": "LIBRARIES", "path": "constants/cli.py", "snippet": "LIBRARIES = [\n \"python\",\n \"boto3\",\n \"urllib3\",\n \"botocore\",\n \"setuptools\",\n \"requests\",\n \"typing-extensions\",\n \"certifi\",\n \"charset-normalizer\",\n \"wheel\",\n \"cryptography\",\n \"python-dateutil\",\n \"idna\",\n \"pyyaml\",\n \"google-api-core\",\n \"six\",\n \"pytz\",\n \"numpy\",\n \"importlib-metadata\",\n \"pip\",\n \"packaging\",\n \"zipp\",\n \"awscli\",\n \"aiobotocore\",\n \"protobuf\",\n \"click\",\n \"pandas\",\n \"pyasn1\",\n \"rsa\",\n \"fsspec\",\n \"pyjwt\",\n \"jmespath\",\n \"markupsafe\",\n \"s3fs\",\n \"attrs\",\n \"cffi\",\n \"psutil\",\n \"lxml\",\n \"pydantic\",\n \"colorama\",\n \"platformdirs\",\n \"googleapis-common-protos\",\n \"pycparser\",\n \"google-auth\",\n \"pyopenssl\",\n \"virtualenv\",\n \"cachetools\",\n \"werkzeug\",\n \"jinja2\",\n \"jsonschema\",\n \"filelock\",\n \"flask\",\n \"sqlalchemy\",\n \"pyparsing\",\n \"docutils\",\n \"async-timeout\",\n \"tzlocal\",\n \"oauthlib\",\n \"pluggy\",\n \"tomli\",\n \"aiohttp\",\n \"grpcio\",\n \"requests-oauthlib\",\n \"pyarrow\",\n \"pytest\",\n \"wrapt\",\n \"tqdm\",\n \"soupsieve\",\n \"dnspython\",\n \"isodate\",\n \"azure-core\",\n \"frozenlist\",\n \"coverage\",\n \"pygments\",\n \"websocket-client\",\n \"beautifulsoup4\",\n \"pillow\",\n \"greenlet\",\n \"importlib-resources\",\n \"distlib\",\n \"yarl\",\n \"multidict\",\n \"scipy\",\n \"decorator\",\n \"aiofiles\",\n \"et-xmlfile\",\n \"openpyxl\",\n \"google-cloud-storage\",\n \"google-cloud-core\",\n \"httptools\",\n \"chardet\",\n \"iniconfig\",\n \"asn1crypto\",\n \"tomlkit\",\n \"tabulate\",\n \"more-itertools\",\n \"requests-toolbelt\",\n \"google-resumable-media\",\n \"paramiko\",\n \"aioconsole\",\n \"deprecated\",\n \"gitpython\",\n \"pynacl\",\n \"google-api-python-client\",\n \"pymysql\",\n \"psycopg2\",\n \"rpds-py\",\n \"proto-plus\",\n \"anyio\",\n \"itsdangerous\",\n \"msal\",\n \"referencing\",\n \"azure-storage-blob\",\n \"jsonschema-specifications\",\n \"bcrypt\",\n \"pathspec\",\n \"scikit-learn\",\n \"smmap\",\n \"msgpack\",\n \"matplotlib\",\n \"poetry-core\",\n \"keyring\",\n \"joblib\",\n \"regex\",\n \"mypy-extensions\",\n \"wcwidth\",\n \"docker\",\n \"sniffio\",\n \"google-auth-oauthlib\",\n \"kiwisolver\",\n \"portalocker\",\n \"pexpect\",\n \"ptyprocess\",\n \"jaraco-classes\",\n \"dill\",\n \"pyrsistent\",\n \"ruamel-yaml\",\n \"gitdb\",\n \"pycryptodomex\",\n \"sqlparse\",\n \"msrest\",\n \"google-crc32c\",\n \"sagemaker\",\n \"tenacity\",\n \"prompt-toolkit\",\n \"google-cloud-bigquery\",\n \"tzdata\",\n \"snowflake-connector-python\",\n \"gunicorn\",\n \"cython\",\n \"py4j\",\n \"py\",\n \"markdown\",\n \"azure-identity\",\n \"httplib2\",\n \"future\",\n \"fonttools\",\n \"alembic\",\n \"markdown-it-py\",\n \"cachecontrol\",\n \"awswrangler\",\n \"rich\",\n \"msal-extensions\",\n \"tornado\",\n \"threadpoolctl\",\n \"jedi\",\n \"marshmallow\",\n \"google-auth-httplib2\",\n \"traitlets\",\n \"cloudpickle\",\n \"shellingham\",\n \"redis\",\n \"pycodestyle\",\n \"backoff\",\n \"python-dotenv\",\n \"scramp\",\n \"toml\",\n \"h11\",\n \"pytest-cov\",\n \"termcolor\",\n \"trove-classifiers\",\n \"annotated-types\",\n \"uritemplate\",\n \"ipython\",\n \"pyzmq\",\n \"networkx\",\n \"xmltodict\",\n \"uvicorn\",\n \"pyspark\",\n \"pg8000\",\n \"mccabe\",\n \"ply\",\n \"prometheus-client\",\n \"prometheus-python\",\n \"redshift-connector\",\n \"oscrypto\",\n \"dulwich\",\n \"webencodings\",\n \"pyodbc\",\n \"pycryptodome\",\n \"httpx\",\n \"sortedcontainers\",\n \"httpcore\",\n \"jeepney\",\n \"mako\",\n \"babel\",\n \"poetry\",\n \"secretstorage\",\n \"defusedxml\",\n \"isort\",\n \"jsonpointer\",\n \"blinker\",\n \"black\",\n \"jupyter-client\",\n \"typing-inspect\",\n \"jupyter-core\",\n \"pymongo\",\n \"mdit-py-plugins\",\n \"datadog\",\n \"contourpy\",\n \"adal\",\n \"pkginfo\",\n \"parso\",\n \"tensorboard\",\n \"toolz\",\n \"pyflakes\",\n \"absl-py\",\n \"sentry-sdk\",\n \"xlrd\",\n \"requests-aws4auth\",\n \"flake8\",\n \"jsonpath-ng\",\n \"python-json-logger\",\n \"nbconvert\",\n \"pickleshare\",\n \"build\",\n \"mdurl\",\n \"backcall\",\n \"fastapi\",\n \"rapidfuzz\",\n \"argcomplete\",\n \"python-utils\",\n \"transformers\",\n \"matplotlib-inline\",\n \"setuptools-scm\",\n \"nbformat\",\n \"ipykernel\",\n \"databricks-cli\",\n \"notebook\",\n \"fastjsonschema\",\n \"jupyter-server\",\n \"mistune\",\n \"huggingface-hub\",\n \"kubernetes\",\n \"debugpy\",\n \"starlette\",\n \"arrow\",\n \"asttokens\",\n \"progressbar2\",\n \"tensorflow\",\n \"google-cloud-pubsub\",\n \"websockets\",\n \"astroid\",\n \"jsonpatch\",\n \"asynctest\",\n \"aioitertools\",\n \"imageio\",\n \"simplejson\",\n \"appdirs\",\n \"pyproject-hooks\",\n \"pylint\",\n \"pbr\",\n \"lazy-object-proxy\",\n \"multiprocess\",\n \"smart-open\",\n \"altair\",\n \"h5py\",\n \"asgiref\",\n \"backports-zoneinfo\",\n \"tinycss2\",\n \"entrypoints\",\n \"bleach\",\n \"oauth2client\",\n \"llvmlite\",\n \"numba\",\n \"cattrs\",\n \"crashtest\",\n \"mlflow\",\n \"send2trash\",\n \"shapely\",\n \"elasticsearch\",\n \"comm\",\n \"cleo\",\n \"orjson\",\n \"pendulum\",\n \"pytest-runner\",\n \"nbclient\",\n \"aenum\",\n \"pygithub\",\n \"identify\",\n \"msrestazure\",\n \"nodeenv\",\n \"mypy\",\n \"flatbuffers\",\n \"great-expectations\",\n \"mock\",\n \"jupyterlab-server\",\n \"zope-interface\",\n \"pytzdata\",\n \"loguru\",\n \"argon2-cffi\",\n \"tokenizers\",\n \"typeguard\",\n \"overrides\",\n \"tox\",\n \"requests-file\",\n \"humanfriendly\",\n \"json5\",\n \"xlsxwriter\",\n \"pysocks\",\n \"google-pasta\",\n \"cfgv\",\n \"pyathena\",\n \"gast\",\n \"azure-storage-file-datalake\",\n \"ipywidgets\",\n \"rfc3339-validator\",\n \"executing\",\n \"jupyterlab\",\n \"pre-commit\",\n \"django\",\n \"querystring-parser\",\n \"contextlib2\",\n \"cached-property\",\n \"installer\",\n \"deepdiff\",\n \"pure-eval\",\n \"tensorflow-serving-api\",\n \"nltk\",\n \"semver\",\n \"retry\",\n \"hvac\",\n \"pipenv\",\n \"uri-template\",\n \"torch\",\n \"execnet\",\n \"html5lib\",\n \"typer\",\n \"croniter\",\n \"lockfile\",\n \"slack-sdk\",\n \"watchdog\",\n \"dataclasses\",\n \"gremlinpython\",\n \"types-pyyaml\",\n \"tensorflow-io-gcs-filesystem\",\n \"setproctitle\",\n \"azure-mgmt-core\",\n \"responses\",\n \"sphinx\",\n \"statsmodels\",\n \"text-unidecode\",\n \"dataclasses-json\",\n \"pandocfilters\",\n \"pytest-xdist\",\n \"async-lru\",\n \"click-plugins\",\n \"opentelemetry-api\",\n \"selenium\",\n \"safetensors\",\n \"opencv-python\",\n \"python-slugify\",\n \"xgboost\",\n \"distro\",\n \"plotly\",\n \"sentencepiece\",\n \"webcolors\",\n \"types-requests\",\n \"rfc3986\",\n \"terminado\",\n \"jupyter-lsp\",\n \"rfc3986-validator\",\n \"configparser\",\n \"argon2-cffi-bindings\",\n \"cmake\",\n \"fastavro\",\n \"docopt\",\n \"unidecode\",\n \"retrying\",\n \"types-urllib3\",\n \"apache-airflow\",\n \"pytest-mock\",\n \"fqdn\",\n \"isoduration\",\n \"tblib\",\n \"prettytable\",\n \"semantic-version\",\n \"sympy\",\n \"seaborn\",\n \"confluent-kafka\",\n \"azure-keyvault-secrets\",\n \"opt-einsum\",\n \"faker\",\n \"jsonpickle\",\n \"mpmath\",\n \"patsy\",\n \"azure-mgmt-resource\",\n \"libclang\",\n \"opencensus\",\n \"antlr4-python3-runtime\",\n \"pysftp\",\n \"ordered-set\",\n \"pymssql\",\n \"db-dtypes\",\n \"astunparse\",\n \"inflection\",\n \"gcsfs\",\n \"thrift\",\n \"parsedatetime\",\n \"dask\",\n \"deprecation\",\n \"scikit-image\",\n \"azure-datalake-store\",\n \"moto\",\n \"zeep\",\n \"makefun\",\n \"pyhcl\",\n \"boto\",\n \"libcst\",\n \"graphviz\",\n \"stevedore\",\n \"gspread\",\n \"snowballstemmer\",\n \"ujson\",\n \"zope-event\",\n \"gevent\",\n \"pyproj\",\n \"checkov\",\n \"python-gnupg\",\n \"pathos\",\n \"trio\",\n \"trio-websocket\",\n \"azure-eventhub\",\n \"typed-ast\",\n \"kombu\",\n \"shap\",\n \"pox\",\n \"ppft\",\n \"datasets\",\n \"apscheduler\",\n \"torchvision\",\n \"click-man\",\n \"accelerate\",\n \"coloredlogs\",\n \"xxhash\",\n \"brotli\",\n \"mypy-boto3-rds\",\n \"docstring-parser\",\n \"applicationinsights\",\n \"apache-beam\",\n \"structlog\",\n \"tldextract\",\n \"lightgbm\",\n \"email-validator\",\n \"wandb\",\n \"cligj\",\n \"kafka-python\",\n \"pybind11\",\n \"fire\",\n \"celery\",\n \"wsproto\",\n \"pywavelets\",\n \"numexpr\",\n \"authlib\",\n \"datetime\",\n \"colorlog\",\n \"pathlib2\",\n \"uamqp\",\n \"texttable\",\n \"pytest-asyncio\",\n \"google-cloud-logging\",\n \"azure-cosmos\",\n \"delta-spark\",\n \"ecdsa\",\n \"nvidia-cudnn-cu11\",\n \"enum34\",\n \"flask-cors\",\n \"slicer\",\n \"spacy\",\n \"fiona\",\n \"python-jose\",\n \"watchtower\",\n \"unicodecsv\",\n \"imagesize\",\n \"schema\",\n \"alabaster\",\n \"kfp\",\n \"geopandas\",\n \"marshmallow-enum\",\n \"apache-airflow-providers-common-sql\",\n \"pyfunctional\",\n \"dbt-core\",\n \"validators\",\n \"keras-preprocessing\",\n \"holidays\",\n \"python-daemon\",\n \"readme-renderer\",\n \"djangorestframework\",\n \"pandas-gbq\",\n \"azure-storage-queue\",\n \"azure-servicebus\",\n \"hypothesis\",\n \"tifffile\",\n \"sshtunnel\",\n \"graphframes\",\n \"lz4\",\n \"kfp-server-api\",\n \"python-magic\",\n \"invoke\",\n \"avro-python3\",\n \"parse\",\n \"kfp-pipeline-spec\",\n \"freezegun\",\n \"constructs\",\n \"outcome\",\n \"python-multipart\",\n \"billiard\",\n \"monotonic\",\n \"pip-tools\",\n \"vine\",\n \"fasteners\",\n \"ddtrace\",\n \"databricks-sql-connector\",\n \"pycountry\",\n \"azure-keyvault-keys\",\n \"sendgrid\",\n \"click-repl\",\n \"srsly\",\n \"pika\",\n \"chex\",\n \"thinc\",\n \"ijson\",\n \"jira\",\n \"docker-pycreds\",\n \"hpack\",\n \"opencv-python-headless\",\n \"blis\",\n \"flask-sqlalchemy\",\n \"fuzzywuzzy\",\n \"xlwt\",\n \"imbalanced-learn\",\n \"qtconsole\",\n \"pydata-google-auth\",\n \"h2\",\n \"pyproject-api\",\n \"sh\",\n \"lit\",\n \"hyperframe\",\n \"stringcase\",\n \"astor\",\n \"langchain-guides\",\n \"langchain\",\n \"wasabi\",\n \"pytest-metadata\",\n \"bitarray\",\n \"pathtools\",\n \"catalogue\",\n \"nose\",\n \"yapf\",\n \"distributed\",\n \"amqp\",\n \"pathy\",\n \"qtpy\",\n \"types-pytz\",\n \"boto3-stubs\",\n \"triton\",\n \"office365-rest-python-client\",\n \"hatchling\",\n \"jupyter-console\",\n \"slackclient\",\n \"atomicwrites\",\n \"starkbank-ecdsa\",\n \"omegaconf\",\n \"editables\",\n \"uvloop\",\n \"humanize\",\n \"knack\",\n \"botocore-stubs\",\n \"iso8601\",\n \"smdebug-rulesconfig\",\n \"crcmod\",\n \"torchmetrics\",\n \"fastparquet\",\n \"python-levenshtein\",\n \"pytimeparse\",\n \"mypy-boto3-s3\",\n \"einops\",\n \"pywin32\",\n \"jpype1\",\n \"pydeequ\",\n \"cog\",\n \"azure-cli\",\n \"pymeeus\",\n \"types-six\",\n \"murmurhash\",\n \"ansible\",\n \"pyspnego\",\n \"inflect\",\n \"phonenumbers\",\n \"flask-wtf\",\n \"cymem\",\n \"preshed\",\n \"cdk-nag\",\n \"aws-requests-auth\",\n \"google-cloud-audit-log\",\n \"ua-parser\",\n \"jsondiff\",\n \"yamllint\",\n \"nbclassic\",\n \"cerberus\",\n \"lazy-loader\",\n \"dacite\",\n \"statsd\",\n \"cssselect\",\n \"dpath\",\n \"apispec\",\n \"gensim\",\n \"django-cors-headers\",\n \"ruff\",\n \"gradio\",\n \"convertdate\",\n \"scp\",\n \"geopy\",\n \"sqlalchemy-utils\",\n \"azure-data-tables\",\n \"pypdf2\",\n \"partd\",\n \"graphql-core\",\n \"python-gitlab\",\n \"ninja\",\n \"ratelimit\",\n \"junit-xml\",\n \"levenshtein\",\n \"fabric\",\n \"pydot\",\n \"azure-storage-file-share\",\n \"pytorch-lightning\",\n \"watchfiles\",\n \"types-setuptools\",\n \"requests-mock\",\n \"strip-hints\",\n \"keras-applications\",\n \"pyotp\",\n \"mashumaro\",\n \"apache-airflow-providers-http\",\n \"ipaddress\",\n \"timm\",\n \"click-didyoumean\",\n \"bytecode\",\n \"parameterized\",\n \"netaddr\",\n \"flask-appbuilder\",\n \"pyperclip\",\n \"openapi-spec-validator\",\n \"onnx\",\n \"marshmallow-sqlalchemy\",\n \"locket\",\n \"lark\",\n \"mysqlclient\",\n \"confection\",\n \"pytest-html\",\n \"azure-cosmosdb-table\",\n \"agate\",\n \"geographiclib\",\n \"types-paramiko\",\n \"pytest-rerunfailures\",\n \"pyserial\",\n \"spacy-loggers\",\n \"flask-login\",\n \"flask-jwt-extended\",\n \"azure-devops\",\n \"xarray\",\n \"spark-nlp\",\n \"dateparser\",\n \"onnxruntime\",\n \"twisted\",\n \"lightning-utilities\",\n \"wtforms\",\n \"jaydebeapi\",\n \"bokeh\",\n \"natsort\",\n \"google-cloud-bigtable\",\n \"grpcio-health-checking\",\n \"tensorflow-text\",\n \"twine\",\n \"commonmark\",\n \"grpcio-reflection\",\n \"flask-caching\",\n \"cron-descriptor\",\n \"pyaml\",\n \"geoip2\",\n \"nh3\",\n \"autopep8\",\n \"python-editor\",\n \"logbook\",\n \"ftfy\",\n \"cachelib\",\n \"datadog-api-client\",\n \"jupyter\",\n \"hologram\",\n \"protobuf3-to-dict\",\n \"ndg-httpsclient\",\n \"promise\",\n \"azureml-core\",\n \"pydub\",\n \"jax\",\n \"flit-core\",\n \"zstandard\",\n \"cssselect2\",\n \"minimal-snowplow-tracker\",\n \"dbt-extractor\",\n \"connexion\",\n \"azure-keyvault-certificates\",\n \"configargparse\",\n \"aniso8601\",\n \"cairocffi\",\n \"hyperlink\",\n \"cramjam\",\n \"elasticsearch-dsl\",\n \"mypy-boto3-redshift-data\",\n \"h3\",\n \"cairosvg\",\n \"maxminddb\",\n \"pytz-deprecation-shim\",\n \"reportlab\",\n \"langcodes\",\n \"pytest-forked\",\n \"pymupdf\",\n \"ansible-core\",\n \"cloudevents\",\n \"leather\",\n \"ddsketch\",\n \"jaxlib\",\n \"oldest-supported-numpy\",\n \"tiktoken\",\n \"supervisor\",\n \"diskcache\",\n \"functions-framework\",\n \"hdfs\",\n \"apache-airflow-providers-ssh\",\n \"gradio-client\",\n \"azure-multiapi-storage\",\n \"funcsigs\",\n \"azure-kusto-data\",\n \"envier\",\n \"pyhive\",\n \"types-protobuf\",\n \"django-filter\",\n \"elastic-transport\",\n \"parse-type\",\n \"types-python-dateutil\",\n \"boltons\",\n \"python-docx\",\n \"twilio\",\n \"twilio-python\",\n \"pgpy\",\n \"korean-lunar-calendar\",\n \"azure-eventgrid\",\n \"async-generator\",\n \"globus-sdk\",\n \"apache-airflow-providers-imap\",\n \"sentence-transformers\",\n \"mkdocs-material\",\n \"aws-xray-sdk\",\n \"resolvelib\",\n \"linkify-it-py\",\n \"setuptools-rust\",\n \"google\",\n \"terminaltables\",\n \"keystoneauth1\",\n \"apache-airflow-providers-ftp\",\n \"javaproperties\",\n \"sqlalchemy-redshift\",\n \"jdcal\",\n \"pep517\",\n \"incremental\",\n \"limits\",\n \"unittest-xml-reporting\",\n \"frozendict\",\n \"service-identity\",\n \"factory-boy\",\n \"ml-dtypes\",\n \"addict\",\n \"uc-micro-py\",\n \"shortuuid\",\n \"pypandoc\",\n \"blessed\",\n \"cx-oracle\",\n \"requests-ntlm\",\n \"django-extensions\",\n \"apache-airflow-providers-amazon\",\n \"python-keystoneclient\",\n \"bracex\",\n \"cmdstanpy\",\n \"apache-airflow-providers-sqlite\",\n \"cookiecutter\",\n \"types-cryptography\",\n \"flask-session\",\n \"timezonefinder\",\n \"magicattr\",\n \"pymsteams\",\n \"pylint-plugin-utils\",\n \"voluptuous\",\n \"langsmith\",\n \"cinemagoer\",\n \"passlib\",\n \"imdbpy\",\n \"emoji\",\n \"databricks-api\",\n \"configobj\",\n \"bandit\",\n \"ultralytics\",\n \"w3lib\",\n \"dirac\",\n \"backports-functools-lru-cache\",\n \"tableauserverclient\",\n \"automat\",\n \"pypika\",\n \"pydash\",\n \"py-cpuinfo\",\n \"mmh3\",\n \"tokenize-rt\",\n \"python-swiftclient\",\n \"tensorflow-hub\",\n \"librosa\",\n \"webdriver-manager\",\n \"constantly\",\n \"user-agents\",\n \"injector\",\n \"youtube-dl\",\n \"pdfminer-six\",\n \"markdown2\",\n \"ffmpy\",\n \"mergedeep\",\n \"netifaces\",\n \"databricks-sdk\",\n \"azure-keyvault-administration\",\n \"ephem\",\n \"flax\",\n \"urllib3-secure-extra\",\n \"looker-sdk\",\n \"kornia\",\n \"python3-openid\",\n \"userpath\",\n \"polars\",\n \"tensorboardx\",\n \"openapi-schema-validator\",\n \"jellyfish\",\n \"ray\",\n \"django-storages\",\n \"asyncpg\",\n \"dynamodb-json\",\n \"pycocotools\",\n \"lunarcalendar\",\n \"types-redis\",\n \"dm-tree\",\n \"flask-limiter\",\n \"scapy\",\n \"sacremoses\",\n \"hiredis\",\n \"netcdf4\",\n \"pyhocon\",\n \"cmaes\",\n \"feedparser\",\n \"firebase-admin\",\n \"yacs\",\n \"prison\",\n \"pytest-localserver\",\n \"polling2\",\n \"flask-babel\",\n \"influxdb\",\n \"binaryornot\",\n \"psycopg3\",\n \"sarif-om\",\n \"jschema-to-python\",\n \"cfn-flip\",\n \"google-apitools\",\n \"ipdb\",\n \"pyrfc3339\",\n \"filterpy\",\n \"py-spy\",\n \"wcmatch\",\n \"launchdarkly-server-sdk\",\n \"pyelftools\",\n \"logging-azure-rest\",\n \"python-jenkins\",\n \"apache-airflow-providers-cncf-kubernetes\",\n \"google-ads\",\n \"clickclick\",\n \"streamlit\",\n \"pylint-django\",\n \"yq\",\n \"findspark\",\n \"pycares\",\n \"mkdocs\",\n \"pytimeparse2\",\n \"ldap3\",\n \"pyee\",\n \"pydocstyle\",\n \"catboost\",\n \"sqlalchemy-jsonfield\",\n \"optuna\",\n \"aws-lambda-powertools\",\n \"lru-dict\",\n \"rasterio\",\n \"cartoframes\",\n \"carto\",\n \"aiodns\",\n \"pyrestcli\",\n \"opentracing\",\n \"tensorflow-datasets\",\n \"apache-airflow-providers-google\",\n \"jsonlines\",\n \"azure\",\n \"backports-weakref\",\n \"diff-cover\",\n \"cftime\",\n \"azure-kusto-ingest\",\n \"qrcode\",\n \"redis-py-cluster\",\n \"diffusers\",\n \"grpclib\",\n \"pypdf\",\n \"thrift-sasl\",\n \"django-debug-toolbar\",\n \"dynaconf\",\n \"django-redis\",\n \"salesforce-bulk\",\n \"kazoo\",\n \"configupdater\",\n \"comtypes\",\n \"langdetect\",\n \"hydra-core\",\n \"pytest-django\",\n \"pywin32-ctypes\",\n \"pyminizip\",\n \"pathvalidate\",\n \"google-re2\",\n \"idna-ssl\",\n \"dagster-pandas\",\n \"toposort\",\n \"expiringdict\",\n \"rdflib\",\n \"etils\",\n \"rich-argparse\",\n \"xyzservices\",\n \"bottle\",\n \"oslo-utils\",\n \"prophet\",\n \"pdfplumber\",\n \"azure-mgmt-subscription\",\n \"parsl\",\n \"jsii\",\n \"click-option-group\",\n \"analytics-python\",\n \"home-run\",\n \"funcx\",\n \"funcx-common\",\n \"lmdb\",\n \"zict\",\n \"multi-key-dict\",\n \"hatch-fancy-pypi-readme\",\n \"googlemaps\",\n \"pyudev\",\n \"atlassian-python-api\",\n \"dohq-artifactory\",\n \"oslo-i18n\",\n \"whitenoise\",\n \"aiosqlite\",\n \"python-engineio\",\n \"enum-compat\",\n \"affine\",\n \"fs\",\n \"flake8-bugbear\",\n \"hyperopt\",\n \"multipledispatch\",\n \"oslo-serialization\",\n \"pygeohash\",\n \"somnium\",\n \"kaleido\",\n \"python-snappy\",\n \"python-pptx\",\n \"gql\",\n \"pymdown-extensions\",\n \"wexpect\",\n \"types-pyopenssl\",\n \"foundationdb\",\n \"jsonschema-spec\",\n \"iopath\",\n \"snuggs\",\n \"strict-rfc3339\",\n \"tablib\",\n \"orderedmultidict\",\n \"sqlglot\",\n \"fakeredis\",\n \"pystan\",\n \"python-socketio\",\n \"robotframework\",\n \"pkgconfig\",\n \"pycairo\",\n \"python-consul\",\n \"curlify\",\n \"types-toml\",\n \"backports-tempfile\",\n \"multimethod\",\n \"pynamodb\",\n \"docker-compose\",\n \"munch\",\n \"torchaudio\",\n \"elementpath\",\n \"mypy-boto3-lambda\",\n \"python-decouple\",\n \"mypy-boto3-dynamodb\",\n \"pylev\",\n \"pmdarima\",\n \"drf-yasg\",\n \"path\",\n \"pyxlsb\",\n \"pandasql\",\n \"pipdeptree\",\n \"debtcollector\",\n \"nvidia-ml-py\",\n \"pyinstaller-hooks-contrib\",\n \"dvclive\",\n \"koalas\",\n \"arviz\",\n \"coreapi\",\n \"sqlalchemy-bigquery\",\n \"pyquery\",\n \"webob\",\n \"faiss-cpu\",\n \"flower\",\n \"cloudformation-cli\",\n \"azureml-dataset-runtime\",\n \"azure-mgmt\",\n \"cloudformation-cli-java-plugin\",\n \"pyinstaller\",\n \"python-box\",\n \"pympler\",\n \"mypy-boto3-secretsmanager\",\n \"marshmallow-oneofschema\",\n \"schedule\",\n \"resampy\",\n \"bitstring\",\n \"timeout-decorator\",\n \"furl\",\n \"bidict\",\n \"setuptools-git\",\n \"jsonmerge\",\n \"htmlmin\",\n \"plumbum\",\n \"gdown\",\n \"evergreen-py\",\n \"tableauhyperapi\",\n \"xformers\",\n \"yt-dlp\",\n \"waitress\",\n \"mypy-boto3-cloudformation\",\n \"tld\",\n \"pipx\",\n \"fake-useragent\",\n \"junitparser\",\n \"pylint-flask\",\n \"jaraco-functools\",\n \"geomet\",\n \"yappi\",\n \"flask-openid\",\n \"apache-airflow-providers-snowflake\",\n \"ciso8601\",\n \"paho-mqtt\",\n \"aiohttp-retry\",\n \"smbprotocol\",\n \"mypy-protobuf\",\n \"msgpack-python\",\n \"dockerpty\",\n \"cssutils\",\n \"djangorestframework-simplejwt\",\n \"wordcloud\",\n \"pytest-env\",\n \"django-environ\",\n \"s3cmd\",\n \"graphene\",\n \"soundfile\",\n \"html2text\",\n \"dagster-dbt\",\n \"apache-airflow-providers-databricks\",\n \"python-nvd3\",\n \"pygobject\",\n \"azureml-sdk\",\n \"click-default-group\",\n \"azureml-dataprep\",\n \"pygit2\",\n \"boto3-type-annotations\",\n \"imagehash\",\n \"ec2-metadata\",\n \"requests-futures\",\n \"rx\",\n \"geventhttpclient\",\n \"wget\",\n \"xmlschema\",\n \"python-rapidjson\",\n \"playwright\",\n \"flatten-json\",\n \"collections-extended\",\n \"myst-parser\",\n \"flask-restful\",\n \"facebook-business\",\n \"pdpyras\",\n \"python-crfsuite\",\n \"pydeck\",\n \"dash-core-components\",\n \"publication\",\n \"zthreading\",\n \"cheroot\",\n \"minio\",\n \"uwsgi\",\n \"portpicker\",\n \"simplegeneric\",\n \"python-crontab\",\n \"basicsr\",\n \"facexlib\",\n \"testpath\",\n \"json-log-formatter\",\n \"ghp-import\",\n \"sseclient-py\",\n \"ansi2html\",\n \"jproperties\",\n \"django-timezone-field\",\n \"duckdb\",\n \"pygsheets\",\n \"pyzstd\",\n \"opencv-contrib-python\",\n \"pyyaml-env-tag\",\n \"pyaes\",\n \"pooch\",\n \"funcy\",\n \"appnope\",\n \"cerberus-python-client\",\n \"realesrgan\",\n \"readchar\",\n \"cassandra-driver\",\n \"requests-unixsocket\",\n \"pyproject-metadata\",\n \"dictdiffer\",\n \"pypng\",\n \"ffmpeg-python\",\n \"locust\",\n \"pymc\",\n \"modelx\",\n \"ffn\",\n \"finance-py\",\n \"gs-quant\",\n \"tf-quant-finance\",\n \"finta\",\n \"qstrader\",\n \"blankly\",\n \"ta-lib-python\",\n \"zipline\",\n \"bt\",\n \"backtrader\",\n \"pyalgotrade\",\n \"pandas-ta\",\n \"ta\",\n \"finmarket-py\",\n \"zvt\",\n \"py-portfolio-opt\",\n \"eiten\",\n \"backtesting-py\",\n \"quantstats\",\n \"qtpylib\",\n \"freqtrade\",\n \"qlib\",\n \"jesse\",\n \"finrl\",\n \"bulbea\",\n \"octobot\",\n \"tda-api\",\n \"vectorbt\",\n \"lean\",\n \"pybroker\",\n \"pyfolio\",\n \"empyrical\",\n \"finquant\",\n \"riskfolio-lib\",\n \"alphalens\",\n \"arch\",\n \"pyflux\",\n \"tsfresh\",\n \"gluonts\",\n \"yfinance\",\n \"alpha-vantage\",\n \"pandas-datareader\",\n \"yahoo-finance\",\n \"findatapy\",\n \"wallstreet\",\n \"alpaca-trade-api-python\",\n \"investpy\",\n \"xlwings\",\n \"dtale\",\n \"mplfinance\",\n \"keras\",\n \"opensearch-py\",\n \"openai\",\n \"dash\",\n \"stripe\",\n]" }, { "identifier": "OPENAI_MODELS", "path": "constants/cli.py", "snippet": "OPENAI_MODELS = [\n \"gpt-4-1106-preview\",\n \"gpt-4\",\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n]" }, { "identifier": "MODELS_TO_TOKENS", "path": "constants/ai.py", "snippet": "MODELS_TO_TOKENS = {\n \"gpt-4\": 8192,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-32k\": 32768,\n \"gpt-3.5-turbo\": 4097,\n \"gpt-3.5-turbo-16k\": 16385,\n}" } ]
import os import openai import sys import argparse import traceback from getpass import getpass from rich import print as rprint from utils.utils import print_markdown, print_exception, extract_code_blocks, print_help from utils.stream import TextStream from utils.ai import ( retrieve_context, construct_prompt, get_remote_chat_response, get_other_chat_response, ) from constants.cli import ARGUMENTS, LIBRARIES, OPENAI_MODELS from constants.ai import MODELS_TO_TOKENS
13,073
if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window if model in OPENAI_MODELS: context_window = MODELS_TO_TOKENS[model] else: context_window = args.context_window # If local model requested, use LMStudio api_key = "" if args.local: model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters)
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window if model in OPENAI_MODELS: context_window = MODELS_TO_TOKENS[model] else: context_window = args.context_window # If local model requested, use LMStudio api_key = "" if args.local: model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters)
prompts = construct_prompt(
6
2023-11-02 07:07:13+00:00
16k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=None,\n types=None,\n post=False,\n loop=None,\n ):\n Judge.clear()\n self._judges = get_judges(judges, timeout, verify_ssl)\n self._method = 'POST' if post else 'GET'\n self._max_tries = max_tries\n self._real_ext_ip = real_ext_ip\n self._strict = strict\n self._dnsbl = dnsbl or []\n self._types = types or {}\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = Resolver(loop=self._loop)\n\n self._req_http_proto = not types or bool(\n ('HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5') & types.keys()\n )\n self._req_https_proto = not types or bool(('HTTPS',) & types.keys())\n self._req_smtp_proto = not types or bool(('CONNECT:25',) & types.keys()) # noqa\n\n self._ngtrs = {proto for proto in types or NGTRS}\n\n async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;' % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n\n def _types_passed(self, proxy):\n if not self._types:\n return True\n for proto, lvl in proxy.types.copy().items():\n req_levels = self._types.get(proto)\n if not req_levels or (lvl in req_levels):\n if not self._strict:\n return True\n else:\n if self._strict:\n del proxy.types[proto]\n if self._strict and proxy.types:\n return True\n proxy.log('Protocol or the level of anonymity differs from the requested')\n return False\n\n async def _in_DNSBL(self, host):\n _host = '.'.join(reversed(host.split('.'))) # reverse address\n tasks = []\n for domain in self._dnsbl:\n query = '.'.join([_host, domain])\n tasks.append(self._resolver.resolve(query, logging=False))\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n if any([r for r in responses if not isinstance(r, ResolveError)]):\n return True\n return False\n\n async def check(self, proxy):\n if self._dnsbl:\n if await self._in_DNSBL(proxy.host):\n proxy.log('Found in DNSBL')\n return False\n\n if self._req_http_proto:\n await Judge.ev['HTTP'].wait()\n if self._req_https_proto:\n await Judge.ev['HTTPS'].wait()\n if self._req_smtp_proto:\n await Judge.ev['SMTP'].wait()\n\n if proxy.expected_types:\n ngtrs = proxy.expected_types & self._ngtrs\n else:\n ngtrs = self._ngtrs\n\n results = []\n for proto in ngtrs:\n if proto == 'CONNECT:25':\n result = await self._check_conn_25(proxy, proto)\n else:\n result = await self._check(proxy, proto)\n results.append(result)\n\n proxy.is_working = True if any(results) else False\n\n if proxy.is_working and self._types_passed(proxy):\n return True\n return False\n\n async def _check_conn_25(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n proxy.types[proxy.ngtr.name] = None\n result = True\n break\n finally:\n proxy.close()\n return result\n\n async def _check(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n headers, content, rv = await _send_test_request(\n self._method, proxy, judge\n )\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n content = _decompress_content(headers, content)\n result = _check_test_response(proxy, headers, content, rv)\n if result:\n if proxy.ngtr.check_anon_lvl:\n lvl = _get_anonymity_lvl(\n self._real_ext_ip, proxy, judge, content\n )\n else:\n lvl = None\n proxy.types[proxy.ngtr.name] = lvl\n break\n finally:\n proxy.close()\n return result" }, { "identifier": "ResolveError", "path": "proxyhub/errors.py", "snippet": "class ResolveError(Exception):\n pass" }, { "identifier": "PROVIDERS", "path": "proxyhub/providers.py", "snippet": "PROVIDERS = [\n Provider(\n url='http://www.proxylists.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 49\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks4',\n proto=('SOCKS4'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks5',\n proto=('SOCKS5'),\n ), # added by ZerGo0\n Provider(\n url='http://ipaddress.com/proxy-list/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 53\n Provider(\n url='https://www.sslproxies.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 100\n Provider(\n url='https://freshfreeproxylist.wordpress.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 50\n Provider(\n url='http://proxytime.ru/http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 1400\n Provider(\n url='https://free-proxy-list.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 300\n Provider(\n url='https://us-proxy.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://fineproxy.org/eng/fresh-proxies/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 5500\n Provider(url='https://socks-proxy.net/', proto=('SOCKS4', 'SOCKS5')), # 80\n Provider(\n url='http://www.httptunnel.ge/ProxyListForFree.aspx',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://cn-proxy.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 70\n Provider(\n url='https://hugeproxies.com/home/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 800\n Provider(\n url='http://proxy.rufey.ru/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 153\n Provider(\n url='https://geekelectronics.org/my-servisy/proxy',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 400\n Provider(\n url='http://pubproxy.com/api/proxy?limit=20&format=txt',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 20\n Proxy_list_org(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 140\n Xseo_in(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 240\n Spys_ru(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 660\n Proxylistplus_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 450\n Proxylist_me(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 2872\n Foxtools_ru(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'), max_conn=1\n ), # noqa; 500\n Gatherproxy_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 3212\n Nntime_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 1050\n Blogspot_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 24800\n Gatherproxy_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 30\n Blogspot_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1486\n Tools_rosinstrument_com(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')\n ), # noqa; 4000\n Tools_rosinstrument_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1800\n My_proxy_com(max_conn=2), # noqa; 1000\n Checkerproxy_net(), # noqa; 60000\n Aliveproxy_com(), # noqa; 210\n Freeproxylists_com(), # noqa; 1338\n Webanetlabs_net(), # noqa; 5000\n Maxiproxies_com(), # noqa; 430\n Proxylist_download(), # noqa; 35590\n # # Bad...\n # http://www.proxylist.ro/\n # Provider(url='http://proxydb.net/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS',\n # 'CONNECT:25', 'SOCKS4', 'SOCKS5')),\n # Provider(url='http://www.cybersyndrome.net/pla6.html',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 1100\n # Provider(url='https://www.ip-adress.com/proxy-list',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 57\n # Provider(url='https://www.marcosbl.com/lab/proxies/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 89\n # Provider(url='http://go4free.xyz/Free-Proxy/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 196\n # Provider(url='http://blackstarsecurity.com/proxy-list.txt'), # 7014\n # Provider(url='http://www.get-proxy.net/proxy-archives'), # 519\n # Proxyb_net(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 857\n # Proxz_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n # max_conn=2), # 443\n # Proxynova_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 818\n # _50kproxies_com(), # 822\n # Free_proxy_cz(), # 420\n]" }, { "identifier": "Provider", "path": "proxyhub/providers.py", "snippet": "class Provider:\n \"\"\"Proxy provider.\n\n Provider - a website that publish free public proxy lists.\n\n :param str url: Url of page where to find proxies\n :param tuple proto:\n (optional) List of the types (protocols) that may be supported\n by proxies returned by the provider. Then used as :attr:`Proxy.types`\n :param int max_conn:\n (optional) The maximum number of concurrent connections on the provider\n :param int max_tries:\n (optional) The maximum number of attempts to receive response\n :param int timeout:\n (optional) Timeout of a request in seconds\n \"\"\"\n\n _pattern = IPPortPatternGlobal\n\n def __init__(\n self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None\n ):\n if url:\n self.domain = urlparse(url).netloc\n self.url = url\n self.proto = proto\n self._max_tries = max_tries\n self._timeout = timeout\n self._session = None\n self._cookies = {}\n self._proxies = set()\n # concurrent connections on the current provider\n self._sem_provider = asyncio.Semaphore(max_conn)\n self._loop = loop or asyncio.get_event_loop()\n\n @property\n def proxies(self):\n \"\"\"Return all found proxies.\n\n :return:\n Set of tuples with proxy hosts, ports and types (protocols)\n that may be supported (from :attr:`.proto`).\n\n For example:\n {('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}\n\n :rtype: set\n \"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, new):\n new = [(host, port, self.proto) for host, port in new if port]\n self._proxies.update(new)\n\n async def get_proxies(self):\n \"\"\"Receive proxies from the provider and return them.\n\n :return: :attr:`.proxies`\n \"\"\"\n log.debug('Try to get proxies from %s' % self.domain)\n\n async with aiohttp.ClientSession(\n headers=get_headers(), cookies=self._cookies, loop=self._loop\n ) as self._session:\n await self._pipe()\n\n log.debug(\n '%d proxies received from %s: %s'\n % (len(self.proxies), self.domain, self.proxies)\n )\n return self.proxies\n\n async def _pipe(self):\n await self._find_on_page(self.url)\n\n async def _find_on_pages(self, urls):\n if not urls:\n return\n tasks = []\n if not isinstance(urls[0], dict):\n urls = set(urls)\n for url in urls:\n if isinstance(url, dict):\n tasks.append(self._find_on_page(**url))\n else:\n tasks.append(self._find_on_page(url))\n await asyncio.gather(*tasks)\n\n async def _find_on_page(self, url, data=None, headers=None, method='GET'):\n page = await self.get(url, data=data, headers=headers, method=method)\n oldcount = len(self.proxies)\n try:\n received = self.find_proxies(page)\n except Exception as e:\n received = []\n log.error(\n 'Error when executing find_proxies.'\n 'Domain: %s; Error: %r' % (self.domain, e)\n )\n self.proxies = received\n added = len(self.proxies) - oldcount\n log.debug(\n '%d(%d) proxies added(received) from %s' % (added, len(received), url)\n )\n\n async def get(self, url, data=None, headers=None, method='GET'):\n for _ in range(self._max_tries):\n page = await self._get(url, data=data, headers=headers, method=method)\n if page:\n break\n return page\n\n async def _get(self, url, data=None, headers=None, method='GET'):\n page = ''\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with self._sem_provider, self._session.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as resp:\n page = await resp.text()\n if resp.status != 200:\n log.debug(\n 'url: %s\\nheaders: %s\\ncookies: %s\\npage:\\n%s'\n % (url, resp.headers, resp.cookies, page)\n )\n raise BadStatusError('Status: %s' % resp.status)\n except (\n UnicodeDecodeError,\n BadStatusError,\n asyncio.TimeoutError,\n aiohttp.ClientOSError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n ) as e:\n page = ''\n log.debug('%s is failed. Error: %r;' % (url, e))\n return page\n\n def find_proxies(self, page):\n return self._find_proxies(page)\n\n def _find_proxies(self, page):\n proxies = self._pattern.findall(page)\n return proxies" }, { "identifier": "Proxy", "path": "proxyhub/proxy.py", "snippet": "class Proxy:\n \"\"\"Proxy.\n\n :param str host: IP address of the proxy\n :param int port: Port of the proxy\n :param tuple types:\n (optional) List of types (protocols) which may be supported\n by the proxy and which can be checked to work with the proxy\n :param int timeout:\n (optional) Timeout of a connection and receive a response in seconds\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n\n :raises ValueError: If the host not is IP address, or if the port > 65535\n \"\"\"\n\n @classmethod\n async def create(cls, host, *args, **kwargs):\n \"\"\"Asynchronously create a :class:`Proxy` object.\n\n :param str host: A passed host can be a domain or IP address.\n If the host is a domain, try to resolve it\n :param str *args:\n (optional) Positional arguments that :class:`Proxy` takes\n :param str **kwargs:\n (optional) Keyword arguments that :class:`Proxy` takes\n\n :return: :class:`Proxy` object\n :rtype: proxyhub.Proxy\n\n :raises ResolveError: If could not resolve the host\n :raises ValueError: If the port > 65535\n \"\"\" # noqa: W605\n loop = kwargs.pop('loop', None)\n resolver = kwargs.pop('resolver', Resolver(loop=loop))\n try:\n _host = await resolver.resolve(host)\n self = cls(_host, *args, **kwargs)\n except (ResolveError, ValueError) as e:\n log.error('%s:%s: Error at creating: %s' % (host, args[0], e))\n raise\n return self\n\n def __init__(self, host=None, port=None, types=(), timeout=8, verify_ssl=False):\n self.host = host\n if not Resolver.host_is_ip(self.host):\n raise ValueError(\n 'The host of proxy should be the IP address. '\n 'Try Proxy.create() if the host is a domain'\n )\n\n self.port = int(port)\n if self.port > 65535:\n raise ValueError('The port of proxy cannot be greater than 65535')\n\n self.expected_types = set(types) & {\n 'HTTP',\n 'HTTPS',\n 'CONNECT:80',\n 'CONNECT:25',\n 'SOCKS4',\n 'SOCKS5',\n }\n self._timeout = timeout\n self._ssl_context = True if verify_ssl else _ssl._create_unverified_context()\n self._types = {}\n self._is_working = False\n self.stat = {'requests': 0, 'errors': Counter()}\n self._ngtr = None\n self._geo = Resolver.get_ip_info(self.host)\n self._log = []\n self._runtimes = []\n self._schemes = ()\n self._closed = True\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n\n def __repr__(self):\n \"\"\"Class representation\n e.g. <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>\n \"\"\"\n tpinfo = []\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n s = '{tp}: {lvl}' if lvl else '{tp}'\n s = s.format(tp=tp, lvl=lvl)\n tpinfo.append(s)\n tpinfo = ', '.join(tpinfo)\n return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(\n code=self._geo.code,\n types=tpinfo,\n host=self.host,\n port=self.port,\n avg=self.avg_resp_time,\n )\n\n @property\n def types(self):\n \"\"\"Types (protocols) supported by the proxy.\n\n | Where key is type, value is level of anonymity\n (only for HTTP, for other types level always is None).\n | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n | Available levels: Transparent, Anonymous, High.\n\n :rtype: dict\n \"\"\"\n return self._types\n\n @property\n def is_working(self):\n \"\"\"True if the proxy is working, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._is_working\n\n @is_working.setter\n def is_working(self, val):\n self._is_working = val\n\n @property\n def writer(self):\n return self._writer.get('ssl') or self._writer.get('conn')\n\n @property\n def reader(self):\n return self._reader.get('ssl') or self._reader.get('conn')\n\n @property\n def priority(self):\n return (self.error_rate, self.avg_resp_time)\n\n @property\n def error_rate(self):\n \"\"\"Error rate: from 0 to 1.\n\n For example: 0.7 = 70% requests ends with error.\n\n :rtype: float\n\n .. versionadded:: 0.2.0\n \"\"\"\n if not self.stat['requests']:\n return 0\n return round(sum(self.stat['errors'].values()) / self.stat['requests'], 2)\n\n @property\n def schemes(self):\n \"\"\"Return supported schemes.\"\"\"\n if not self._schemes:\n _schemes = []\n if self.types.keys() & _HTTP_PROTOS:\n _schemes.append('HTTP')\n if self.types.keys() & _HTTPS_PROTOS:\n _schemes.append('HTTPS')\n self._schemes = tuple(_schemes)\n return self._schemes\n\n @property\n def avg_resp_time(self):\n \"\"\"The average connection/response time.\n\n :rtype: float\n \"\"\"\n if not self._runtimes:\n return 0\n return round(sum(self._runtimes) / len(self._runtimes), 2)\n\n @property\n def avgRespTime(self):\n \"\"\"\n .. deprecated:: 2.0\n Use :attr:`avg_resp_time` instead.\n \"\"\"\n warnings.warn(\n '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.',\n DeprecationWarning,\n )\n return self.avg_resp_time\n\n @property\n def geo(self):\n \"\"\"Geo information about IP address of the proxy.\n\n :return:\n Named tuple with fields:\n * ``code`` - ISO country code\n * ``name`` - Full name of country\n * ``region_code`` - ISO region code\n * ``region_name`` - Full name of region\n * ``city_name`` - Full name of city\n :rtype: collections.namedtuple\n\n .. versionchanged:: 0.2.0\n In previous versions return a dictionary, now named tuple.\n \"\"\"\n return self._geo\n\n @property\n def ngtr(self):\n return self._ngtr\n\n @ngtr.setter\n def ngtr(self, proto):\n self._ngtr = NGTRS[proto](self)\n\n def as_json(self):\n \"\"\"Return the proxy's properties in JSON format.\n\n :rtype: dict\n \"\"\"\n info = {\n 'host': self.host,\n 'port': self.port,\n 'geo': {\n 'country': {'code': self._geo.code, 'name': self._geo.name},\n 'region': {\n 'code': self._geo.region_code,\n 'name': self._geo.region_name,\n },\n 'city': self._geo.city_name,\n },\n 'types': [],\n 'avg_resp_time': self.avg_resp_time,\n 'error_rate': self.error_rate,\n }\n\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n info['types'].append({'type': tp, 'level': lvl or ''})\n return info\n\n def as_text(self):\n \"\"\"\n Return proxy as host:port\n\n :rtype: str\n \"\"\"\n return \"{}:{}\\n\".format(self.host, self.port)\n\n def log(self, msg, stime=0, err=None):\n ngtr = self.ngtr.name if self.ngtr else 'INFO'\n runtime = time.time() - stime if stime else 0\n log.debug(\n '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(\n h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime\n )\n )\n trunc = '...' if len(msg) > 58 else ''\n msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)\n self._log.append((ngtr, msg, runtime))\n if err:\n self.stat['errors'][err.errmsg] += 1\n if runtime and 'timeout' not in msg:\n self._runtimes.append(runtime)\n\n def get_log(self):\n \"\"\"Proxy log.\n\n :return: The proxy log in format: (negotaitor, msg, runtime)\n :rtype: tuple\n\n .. versionadded:: 0.2.0\n \"\"\"\n return self._log\n\n async def connect(self, ssl=False):\n err = None\n msg = '%s' % 'SSL: ' if ssl else ''\n stime = time.time()\n self.log('%sInitial connection' % msg)\n try:\n if ssl:\n _type = 'ssl'\n sock = self._writer['conn'].get_extra_info('socket')\n params = {\n 'ssl': self._ssl_context,\n 'sock': sock,\n 'server_hostname': self.host,\n }\n else:\n _type = 'conn'\n params = {'host': self.host, 'port': self.port}\n self._reader[_type], self._writer[_type] = await asyncio.wait_for(\n asyncio.open_connection(**params), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg += 'Connection: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionRefusedError, OSError, _ssl.SSLError):\n msg += 'Connection: failed'\n err = ProxyConnError(msg)\n raise err\n # except asyncio.CancelledError:\n # log.debug('Cancelled in proxy.connect()')\n # raise ProxyConnError()\n else:\n msg += 'Connection: success'\n self._closed = False\n finally:\n self.stat['requests'] += 1\n self.log(msg, stime, err=err)\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n if self.writer:\n # try:\n self.writer.close()\n # except RuntimeError:\n # print('Try proxy.close() when loop is closed:',\n # asyncio.get_event_loop()._closed)\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n self.log('Connection: closed')\n self._ngtr = None\n\n async def send(self, req):\n msg, err = '', None\n _req = req.encode() if not isinstance(req, bytes) else req\n try:\n self.writer.write(_req)\n await self.writer.drain()\n except ConnectionResetError:\n msg = '; Sending: failed'\n err = ProxySendError(msg)\n raise err\n finally:\n self.log('Request: %s%s' % (req, msg), err=err)\n\n async def recv(self, length=0, head_only=False):\n resp, msg, err = b'', '', None\n stime = time.time()\n try:\n resp = await asyncio.wait_for(\n self._recv(length, head_only), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg = 'Received: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionResetError, OSError):\n msg = 'Received: failed' # (connection is reset by the peer)\n err = ProxyRecvError(msg)\n raise err\n else:\n msg = 'Received: %s bytes' % len(resp)\n if not resp:\n err = ProxyEmptyRecvError(msg)\n raise err\n finally:\n if resp:\n msg += ': %s' % resp[:12]\n self.log(msg, stime, err=err)\n return resp\n\n async def _recv(self, length=0, head_only=False):\n resp = b''\n if length:\n try:\n resp = await self.reader.readexactly(length)\n except asyncio.IncompleteReadError as e:\n resp = e.partial\n else:\n body_size, body_recv, chunked = 0, 0, None\n while not self.reader.at_eof():\n line = await self.reader.readline()\n resp += line\n if body_size:\n body_recv += len(line)\n if body_recv >= body_size:\n break\n elif chunked and line == b'0\\r\\n':\n break\n elif not body_size and line == b'\\r\\n':\n if head_only:\n break\n headers = parse_headers(resp)\n body_size = int(headers.get('Content-Length', 0))\n if not body_size:\n chunked = headers.get('Transfer-Encoding') == 'chunked'\n return resp" }, { "identifier": "Resolver", "path": "proxyhub/resolver.py", "snippet": "class Resolver:\n \"\"\"Async host resolver based on aiodns.\"\"\"\n\n _cached_hosts = {}\n _ip_hosts = [\n 'https://wtfismyip.com/text',\n 'http://api.ipify.org/',\n 'http://ipinfo.io/ip',\n 'http://ipv4.icanhazip.com/',\n 'http://myexternalip.com/raw',\n 'http://ipinfo.io/ip',\n 'http://ifconfig.io/ip',\n ]\n # the list of resolvers will point a copy of original one\n _temp_host = []\n\n def __init__(self, timeout=5, loop=None):\n self._timeout = timeout\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = aiodns.DNSResolver(loop=self._loop)\n\n @staticmethod\n def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n host = '.'.join(f'{int(n)}' for n in host.split('.'))\n ipaddress.IPv4Address(host)\n except (ipaddress.AddressValueError, ValueError):\n return False\n else:\n return True\n\n @staticmethod\n def get_ip_info(ip):\n \"\"\"Return geo information about IP address.\n\n `code` - ISO country code\n `name` - Full name of country\n `region_code` - ISO region code\n `region_name` - Full name of region\n `city_name` - Full name of city\n \"\"\"\n # from pprint import pprint\n try:\n ipInfo = _mmdb_reader.get(ip) or {}\n except (maxminddb.errors.InvalidDatabaseError, ValueError):\n ipInfo = {}\n\n code, name = '--', 'Unknown'\n city_name, region_code, region_name = ('Unknown',) * 3\n if 'country' in ipInfo:\n code = ipInfo['country']['iso_code']\n name = ipInfo['country']['names']['en']\n elif 'continent' in ipInfo:\n code = ipInfo['continent']['code']\n name = ipInfo['continent']['names']['en']\n if 'city' in ipInfo:\n city_name = ipInfo['city']['names']['en']\n if 'subdivisions' in ipInfo:\n region_code = ipInfo['subdivisions'][0]['iso_code']\n region_name = ipInfo['subdivisions'][0]['names']['en']\n return GeoData(code, name, region_code, region_name, city_name)\n\n def _pop_random_ip_host(self):\n host = random.choice(self._temp_host)\n self._temp_host.remove(host)\n return host\n\n async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n # make a copy of original one to temp one\n # so original one will stay no change\n self._temp_host = self._ip_hosts.copy()\n while self._temp_host:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n\n async def resolve(self, host, port=80, family=None, qtype='A', logging=True):\n \"\"\"Return resolving IP address(es) from host name.\"\"\"\n if self.host_is_ip(host):\n return host\n\n _host = self._cached_hosts.get(host)\n if _host:\n return _host\n\n resp = await self._resolve(host, qtype)\n\n if resp:\n hosts = [\n {\n 'hostname': host,\n 'host': r.host,\n 'port': port,\n 'family': family,\n 'proto': socket.IPPROTO_IP,\n 'flags': socket.AI_NUMERICHOST,\n }\n for r in resp\n ]\n if family:\n self._cached_hosts[host] = hosts\n else:\n self._cached_hosts[host] = hosts[0]['host']\n if logging:\n log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))\n else:\n if logging:\n log.warning('%s: Could not resolve host' % host)\n return self._cached_hosts.get(host)\n\n async def _resolve(self, host, qtype):\n try:\n resp = await asyncio.wait_for(\n self._resolver.query(host, qtype), timeout=self._timeout\n )\n except (aiodns.error.DNSError, asyncio.TimeoutError):\n raise ResolveError\n else:\n return resp" }, { "identifier": "Server", "path": "proxyhub/server.py", "snippet": "class Server:\n \"\"\"Server distributes incoming requests to a pool of found proxies.\"\"\"\n\n def __init__(\n self,\n host,\n port,\n proxies,\n timeout=8,\n max_tries=3,\n min_queue=5,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n prefer_connect=False,\n http_allowed_codes=None,\n backlog=100,\n loop=None,\n **kwargs,\n ):\n self.host = host\n self.port = int(port)\n self._loop = loop or asyncio.get_event_loop()\n self._timeout = timeout\n self._max_tries = max_tries\n self._backlog = backlog\n self._prefer_connect = prefer_connect\n\n self._server = None\n self._connections = {}\n self._proxy_pool = ProxyPool(\n proxies, min_req_proxy, max_error_rate, max_resp_time, min_queue\n )\n self._resolver = Resolver(loop=self._loop)\n self._http_allowed_codes = http_allowed_codes or []\n\n def start(self):\n\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(self._server.sockets[0].getsockname())\n )\n\n def stop(self):\n if not self._server:\n return\n for conn in self._connections:\n if not conn.done():\n conn.cancel()\n self._server.close()\n if not self._loop.is_running():\n self._loop.run_until_complete(self._server.wait_closed())\n # Time to close the running futures in self._connections\n self._loop.run_until_complete(asyncio.sleep(0.5))\n self._server = None\n self._loop.stop()\n log.info('Server is stopped')\n\n def _accept(self, client_reader, client_writer):\n def _on_completion(f):\n reader, writer = self._connections.pop(f)\n writer.close()\n log.debug('client: %d; closed' % id(client_reader))\n try:\n exc = f.exception()\n except asyncio.CancelledError:\n log.debug('CancelledError in server._handle:_on_completion')\n exc = None\n if exc:\n if isinstance(exc, NoProxyError):\n self.stop()\n else:\n raise exc\n\n f = asyncio.ensure_future(self._handle(client_reader, client_writer))\n f.add_done_callback(_on_completion)\n self._connections[f] = (client_reader, client_writer)\n\n async def _handle(self, client_reader, client_writer):\n log.debug(\n 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),)\n )\n\n request, headers = await self._parse_request(client_reader)\n scheme = self._identify_scheme(headers)\n client = id(client_reader)\n log.debug(\n 'client: %d; request: %s; headers: %s; scheme: %s'\n % (client, request, headers, scheme)\n )\n\n # API for controlling proxyhub2\n if headers['Host'] == 'proxycontrol':\n _api, _operation, _params = headers['Path'].split('/', 5)[3:]\n if _api == 'api':\n if _operation == 'remove':\n proxy_host, proxy_port = _params.split(':', 1)\n self._proxy_pool.remove(proxy_host, int(proxy_port))\n log.debug(\n 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s'\n % (client, request, headers, scheme, proxy_host, proxy_port)\n )\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n elif _operation == 'history':\n query_type, url = _params.split(':', 1)\n if query_type == 'url':\n previous_proxy = history.get(\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{url}\"\n )\n if previous_proxy is None:\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n else:\n previous_proxy_bytestring = (\n '{\"proxy\": \"%s\"}' % previous_proxy\n ).encode()\n client_writer.write(b'HTTP/1.1 200 OK\\r\\n')\n client_writer.write(b'Content-Type: application/json\\r\\n')\n client_writer.write(\n f\"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\\r\\n\"\n )\n client_writer.write(b'Access-Control-Allow-Origin: *\\r\\n')\n client_writer.write(\n b'Access-Control-Allow-Credentials: true\\r\\n\\r\\n'\n )\n\n client_writer.write(previous_proxy_bytestring + b'\\r\\n')\n await client_writer.drain()\n return\n\n for attempt in range(self._max_tries):\n stime, err = 0, None\n proxy = await self._proxy_pool.get(scheme)\n proto = self._choice_proto(proxy, scheme)\n log.debug(\n 'client: %d; attempt: %d; proxy: %s; proto: %s'\n % (client, attempt, proxy, proto)\n )\n\n try:\n await proxy.connect()\n\n if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):\n host = headers.get('Host')\n port = headers.get('Port', 80)\n try:\n ip = await self._resolver.resolve(host)\n except ResolveError:\n return\n proxy.ngtr = proto\n await proxy.ngtr.negotiate(host=host, port=port, ip=ip)\n if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):\n client_writer.write(CONNECTED)\n await client_writer.drain()\n else: # HTTP\n await proxy.send(request)\n else: # proto: HTTP & HTTPS\n await proxy.send(request)\n\n history[\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{headers['Path']}\"\n ] = (proxy.host + ':' + str(proxy.port))\n inject_resp_header = {\n 'headers': {'X-Proxy-Info': proxy.host + ':' + str(proxy.port)}\n }\n\n stime = time.time()\n stream = [\n asyncio.ensure_future(\n self._stream(reader=client_reader, writer=proxy.writer)\n ),\n asyncio.ensure_future(\n self._stream(\n reader=proxy.reader,\n writer=client_writer,\n scheme=scheme,\n inject=inject_resp_header,\n )\n ),\n ]\n await asyncio.gather(*stream, loop=self._loop)\n except asyncio.CancelledError:\n log.debug('Cancelled in server._handle')\n break\n except (\n ProxyTimeoutError,\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n log.debug('client: %d; error: %r' % (client, e))\n continue\n except ErrorOnStream as e:\n log.debug(\n 'client: %d; error: %r; EOF: %s'\n % (client, e, client_reader.at_eof())\n )\n for task in stream:\n if not task.done():\n task.cancel()\n if client_reader.at_eof() and 'Timeout' in repr(e):\n # Proxy may not be able to receive EOF and weel be raised a\n # TimeoutError, but all the data has already successfully\n # returned, so do not consider this error of proxy\n break\n err = e\n if scheme == 'HTTPS': # SSL Handshake probably failed\n break\n else:\n break\n finally:\n proxy.log(request.decode(), stime, err=err)\n proxy.close()\n self._proxy_pool.put(proxy)\n\n async def _parse_request(self, reader, length=65536):\n request = await reader.read(length)\n headers = parse_headers(request)\n if headers['Method'] == 'POST' and request.endswith(b'\\r\\n\\r\\n'):\n # For aiohttp. POST data returns on second reading\n request += await reader.read(length)\n return request, headers\n\n def _identify_scheme(self, headers):\n if headers['Method'] == 'CONNECT':\n return 'HTTPS'\n else:\n return 'HTTP'\n\n def _choice_proto(self, proxy, scheme):\n if scheme == 'HTTP':\n if self._prefer_connect and ('CONNECT:80' in proxy.types):\n proto = 'CONNECT:80'\n else:\n relevant = {\n 'HTTP',\n 'CONNECT:80',\n 'SOCKS4',\n 'SOCKS5',\n } & proxy.types.keys()\n proto = relevant.pop()\n else: # HTTPS\n relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys()\n proto = relevant.pop()\n return proto\n\n async def _stream(self, reader, writer, length=65536, scheme=None, inject=None):\n checked = False\n\n try:\n while not reader.at_eof():\n data = await asyncio.wait_for(reader.read(length), self._timeout)\n if not data:\n writer.close()\n break\n elif scheme and not checked:\n self._check_response(data, scheme)\n\n if inject.get('headers') is not None and len(inject['headers']) > 0:\n data = self._inject_headers(data, scheme, inject['headers'])\n\n checked = True\n\n writer.write(data)\n await writer.drain()\n\n except (\n asyncio.TimeoutError,\n ConnectionResetError,\n OSError,\n ProxyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n raise ErrorOnStream(e)\n\n def _check_response(self, data, scheme):\n if scheme == 'HTTP' and self._http_allowed_codes:\n line = data.split(b'\\r\\n', 1)[0].decode()\n try:\n header = parse_status_line(line)\n except BadStatusLine:\n raise BadResponseError\n if header['Status'] not in self._http_allowed_codes:\n raise BadStatusError(\n '%r not in %r' % (header['Status'], self._http_allowed_codes)\n )\n\n def _inject_headers(self, data, scheme, headers):\n custom_lines = []\n\n if scheme == 'HTTP' or scheme == 'HTTPS':\n status_line, rest_lines = data.split(b'\\r\\n', 1)\n custom_lines.append(status_line)\n\n for k, v in headers.items():\n custom_lines.append(('%s: %s' % (k, v)).encode())\n\n custom_lines.append(rest_lines)\n data = b'\\r\\n'.join(custom_lines)\n\n return data" }, { "identifier": "IPPortPatternLine", "path": "proxyhub/utils.py", "snippet": "BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ndef get_headers(rv=False):\ndef get_all_ip(page):\ndef get_status_code(resp, start=9, stop=12):\ndef parse_status_line(line):\ndef parse_headers(headers):\ndef update_geoip_db():" } ]
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
12,780
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue()
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue()
self._resolver = Resolver(loop=self._loop)
5
2023-11-05 13:28:57+00:00
16k
TheFunny/ArisuAutoSweeper
module/device/method/minitouch.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n {'options': {'ENABLE': False}, 'func': 1}\n ]\n }\n \"\"\"\n func_list = {}\n\n @classmethod\n def when(cls, **kwargs):\n \"\"\"\n Args:\n **kwargs: Any option in AzurLaneConfig.\n\n Examples:\n @Config.when(USE_ONE_CLICK_RETIREMENT=True)\n def retire_ships(self, amount=None, rarity=None):\n pass\n\n @Config.when(USE_ONE_CLICK_RETIREMENT=False)\n def retire_ships(self, amount=None, rarity=None):\n pass\n \"\"\"\n from module.logger import logger\n options = kwargs\n\n def decorate(func):\n name = func.__name__\n data = {'options': options, 'func': func}\n if name not in cls.func_list:\n cls.func_list[name] = [data]\n else:\n override = False\n for record in cls.func_list[name]:\n if record['options'] == data['options']:\n record['func'] = data['func']\n override = True\n if not override:\n cls.func_list[name].append(data)\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Args:\n self: ModuleBase instance.\n *args:\n **kwargs:\n \"\"\"\n for record in cls.func_list[name]:\n\n flag = [value is None or self.config.__getattribute__(key) == value\n for key, value in record['options'].items()]\n if not all(flag):\n continue\n\n return record['func'](self, *args, **kwargs)\n\n logger.warning(f'No option fits for {name}, using the last define func.')\n return func(self, *args, **kwargs)\n\n return wrapper\n\n return decorate" }, { "identifier": "cached_property", "path": "module/base/decorator.py", "snippet": "class cached_property(Generic[T]):\n \"\"\"\n cached-property from https://github.com/pydanny/cached-property\n Add typing support\n\n A property that is only computed once per instance and then replaces itself\n with an ordinary attribute. Deleting the attribute resets the property.\n Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76\n \"\"\"\n\n def __init__(self, func: Callable[..., T]):\n self.func = func\n\n def __get__(self, obj, cls) -> T:\n if obj is None:\n return self\n\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value" }, { "identifier": "del_cached_property", "path": "module/base/decorator.py", "snippet": "def del_cached_property(obj, name):\n \"\"\"\n Delete a cached property safely.\n\n Args:\n obj:\n name (str):\n \"\"\"\n try:\n del obj.__dict__[name]\n except KeyError:\n pass" }, { "identifier": "Timer", "path": "module/base/timer.py", "snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise it goes wrong, if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make alas run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n from module.logger import logger\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__" }, { "identifier": "Connection", "path": "module/device/connection.py", "snippet": "class Connection(ConnectionAttr):\n def __init__(self, config):\n \"\"\"\n Args:\n config (AzurLaneConfig, str): Name of the user config under ./config\n \"\"\"\n super().__init__(config)\n if not self.is_over_http:\n self.detect_device()\n\n # Connect\n self.adb_connect(self.serial)\n logger.attr('AdbDevice', self.adb)\n\n # Package\n if self.config.Emulator_PackageName == 'auto':\n self.detect_package()\n else:\n self.package = server_.to_package(self.config.Emulator_PackageName)\n # No set_server cause game client and UI language can be different\n # else:\n # set_server(self.package)\n logger.attr('Server', self.config.Emulator_PackageName)\n server_.server = self.config.Emulator_PackageName\n logger.attr('PackageName', self.package)\n server_.lang = self.config.Emulator_GameLanguage\n logger.attr('Lang', self.config.LANG)\n\n self.check_mumu_app_keep_alive()\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_command(self, cmd, timeout=10):\n \"\"\"\n Execute ADB commands in a subprocess,\n usually to be used when pulling or pushing large files.\n\n Args:\n cmd (list):\n timeout (int):\n\n Returns:\n str:\n \"\"\"\n cmd = list(map(str, cmd))\n cmd = [self.adb_binary, '-s', self.serial] + cmd\n logger.info(f'Execute: {cmd}')\n\n # Use shell=True to disable console window when using GUI.\n # Although, there's still a window when you stop running in GUI, which cause by gooey.\n # To disable it, edit gooey/gui/util/taskkill.py\n\n # No gooey anymore, just shell=False\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n try:\n stdout, stderr = process.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n logger.warning(f'TimeoutExpired when calling {cmd}, stdout={stdout}, stderr={stderr}')\n return stdout\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_command(self, cmd, timeout=10):\n logger.warning(\n f'adb_command() is not available when connecting over http: {self.serial}, '\n )\n raise RequestHumanTakeover\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to `adb -s <serial> shell <*cmd>`\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True and recvall=True\n socket if stream=True and recvall=False\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n if recvall:\n # bytes\n return recv_all(result)\n else:\n # socket\n return result\n else:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n result = remove_shell_warning(result)\n # str\n return result\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to http://127.0.0.1:7912/shell?command={command}\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout)\n # Already received all, so `recvall` is ignored\n result = remove_shell_warning(result.content)\n # bytes\n return result\n else:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout).output\n if rstrip:\n result = result.rstrip()\n result = remove_shell_warning(result)\n # str\n return result\n\n def adb_getprop(self, name):\n \"\"\"\n Get system property in Android, same as `getprop <name>`\n\n Args:\n name (str): Property name\n\n Returns:\n str:\n \"\"\"\n return self.adb_shell(['getprop', name]).strip()\n\n @cached_property\n def cpu_abi(self) -> str:\n \"\"\"\n Returns:\n str: arm64-v8a, armeabi-v7a, x86, x86_64\n \"\"\"\n abi = self.adb_getprop('ro.product.cpu.abi')\n if not len(abi):\n logger.error(f'CPU ABI invalid: \"{abi}\"')\n return abi\n\n @cached_property\n def sdk_ver(self) -> int:\n \"\"\"\n Android SDK/API levels, see https://apilevels.com/\n \"\"\"\n sdk = self.adb_getprop('ro.build.version.sdk')\n try:\n return int(sdk)\n except ValueError:\n logger.error(f'SDK version invalid: {sdk}')\n\n return 0\n\n @cached_property\n def is_avd(self):\n if get_serial_pair(self.serial)[0] is None:\n return False\n if 'ranchu' in self.adb_getprop('ro.hardware'):\n return True\n if 'goldfish' in self.adb_getprop('ro.hardware.audio.primary'):\n return True\n return False\n\n def check_mumu_app_keep_alive(self):\n if not self.is_mumu_family:\n return False\n\n res = self.adb_getprop('nemud.app_keep_alive')\n logger.attr('nemud.app_keep_alive', res)\n if res == '':\n # Empry property, might not be a mumu emulator or might be an old mumu\n return True\n elif res == 'false':\n # Disabled\n return True\n elif res == 'true':\n # https://mumu.163.com/help/20230802/35047_1102450.html\n logger.critical('请在MuMu模拟器设置内关闭 \"后台挂机时保活运行\"')\n raise RequestHumanTakeover\n else:\n logger.warning(f'Invalid nemud.app_keep_alive value: {res}')\n return False\n\n @cached_property\n def _nc_server_host_port(self):\n \"\"\"\n Returns:\n str, int, str, int:\n server_listen_host, server_listen_port, client_connect_host, client_connect_port\n \"\"\"\n # For BlueStacks hyper-v, use ADB reverse\n if self.is_bluestacks_hyperv:\n host = '127.0.0.1'\n logger.info(f'Connecting to BlueStacks hyper-v, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n # For emulators, listen on current host\n if self.is_emulator or self.is_over_http:\n try:\n host = socket.gethostbyname(socket.gethostname())\n except socket.gaierror as e:\n logger.error(e)\n logger.error(f'Unknown host name: {socket.gethostname()}')\n host = '127.0.0.1'\n if platform.system() == 'Linux' and host == '127.0.1.1':\n host = '127.0.0.1'\n logger.info(f'Connecting to local emulator, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n\n # For AVD instance\n if self.is_avd:\n return host, port, \"10.0.2.2\", port\n\n return host, port, host, port\n # For local network devices, listen on the host under the same network as target device\n if self.is_network_device:\n hosts = socket.gethostbyname_ex(socket.gethostname())[2]\n logger.info(f'Current hosts: {hosts}')\n ip = ipaddress.ip_address(self.serial.split(':')[0])\n for host in hosts:\n if ip in ipaddress.ip_interface(f'{host}/24').network:\n logger.info(f'Connecting to local network device, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n return host, port, host, port\n # For other devices, create an ADB reverse and listen on 127.0.0.1\n host = '127.0.0.1'\n logger.info(f'Connecting to unknown device, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n\n @cached_property\n def reverse_server(self):\n \"\"\"\n Setup a server on Alas, access it from emulator.\n This will bypass adb shell and be faster.\n \"\"\"\n del_cached_property(self, '_nc_server_host_port')\n host_port = self._nc_server_host_port\n logger.info(f'Reverse server listening on {host_port[0]}:{host_port[1]}, '\n f'client can send data to {host_port[2]}:{host_port[3]}')\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(host_port[:2])\n server.settimeout(5)\n server.listen(5)\n return server\n\n @cached_property\n def nc_command(self):\n \"\"\"\n Returns:\n list[str]: ['nc'] or ['busybox', 'nc']\n \"\"\"\n sdk = self.sdk_ver\n logger.info(f'sdk_ver: {sdk}')\n if sdk >= 28:\n # Android 9 emulators does not have `nc`, try `busybox nc`\n # BlueStacks Pie (Android 9) has `nc` but cannot send data, try `busybox nc` first\n trial = [\n ['busybox', 'nc'],\n ['nc'],\n ]\n else:\n trial = [\n ['nc'],\n ['busybox', 'nc'],\n ]\n for command in trial:\n # About 3ms\n result = self.adb_shell(command)\n # Result should be command help if success\n # `/system/bin/sh: nc: not found`\n if 'not found' in result:\n continue\n # `/system/bin/sh: busybox: inaccessible or not found\\n`\n if 'inaccessible' in result:\n continue\n logger.attr('nc command', command)\n return command\n\n logger.error('No `netcat` command available, please use screenshot methods without `_nc` suffix')\n raise RequestHumanTakeover\n\n def adb_shell_nc(self, cmd, timeout=5, chunk_size=262144):\n \"\"\"\n Args:\n cmd (list):\n timeout (int):\n chunk_size (int): Default to 262144\n\n Returns:\n bytes:\n \"\"\"\n # Server start listening\n server = self.reverse_server\n server.settimeout(timeout)\n # Client send data, waiting for server accept\n # <command> | nc 127.0.0.1 {port}\n cmd += [\"|\", *self.nc_command, *self._nc_server_host_port[2:]]\n stream = self.adb_shell(cmd, stream=True, recvall=False)\n try:\n # Server accept connection\n conn, conn_port = server.accept()\n except socket.timeout:\n output = recv_all(stream, chunk_size=chunk_size)\n logger.warning(str(output))\n raise AdbTimeout('reverse server accept timeout')\n\n # Server receive data\n data = recv_all(conn, chunk_size=chunk_size, recv_interval=0.001)\n\n # Server close connection\n conn.close()\n return data\n\n def adb_exec_out(self, cmd, serial=None):\n cmd.insert(0, 'exec-out')\n return self.adb_command(cmd, serial)\n\n def adb_forward(self, remote):\n \"\"\"\n Do `adb forward <local> <remote>`.\n choose a random port in FORWARD_PORT_RANGE or reuse an existing forward,\n and also remove redundant forwards.\n\n Args:\n remote (str):\n tcp:<port>\n localabstract:<unix domain socket name>\n localreserved:<unix domain socket name>\n localfilesystem:<unix domain socket name>\n dev:<character device name>\n jdwp:<process pid> (remote only)\n\n Returns:\n int: Port\n \"\"\"\n port = 0\n for forward in self.adb.forward_list():\n if forward.serial == self.serial and forward.remote == remote and forward.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse forward: {forward}')\n port = int(forward.local[4:])\n else:\n logger.info(f'Remove redundant forward: {forward}')\n self.adb_forward_remove(forward.local)\n\n if port:\n return port\n else:\n # Create new forward\n port = random_port(self.config.FORWARD_PORT_RANGE)\n forward = ForwardItem(self.serial, f'tcp:{port}', remote)\n logger.info(f'Create forward: {forward}')\n self.adb.forward(forward.local, forward.remote)\n return port\n\n def adb_reverse(self, remote):\n port = 0\n for reverse in self.adb.reverse_list():\n if reverse.remote == remote and reverse.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse reverse: {reverse}')\n port = int(reverse.local[4:])\n else:\n logger.info(f'Remove redundant forward: {reverse}')\n self.adb_forward_remove(reverse.local)\n\n if port:\n return port\n else:\n # Create new reverse\n port = random_port(self.config.FORWARD_PORT_RANGE)\n reverse = ReverseItem(f'tcp:{port}', remote)\n logger.info(f'Create reverse: {reverse}')\n self.adb.reverse(reverse.local, reverse.remote)\n return port\n\n def adb_forward_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> forward --remove <local>`\n More about the commands send to ADB server, see:\n https://cs.android.com/android/platform/superproject/+/master:packages/modules/adb/SERVICES.TXT\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n list_cmd = f\"host-serial:{self.serial}:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_reverse_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> reverse --remove <local>`\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n c.send_command(f\"host:transport:{self.serial}\")\n c.check_okay()\n list_cmd = f\"reverse:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_push(self, local, remote):\n \"\"\"\n Args:\n local (str):\n remote (str):\n\n Returns:\n str:\n \"\"\"\n cmd = ['push', local, remote]\n return self.adb_command(cmd)\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_connect(self, serial):\n \"\"\"\n Connect to a serial, try 3 times at max.\n If there's an old ADB server running while Alas is using a newer one, which happens on Chinese emulators,\n the first connection is used to kill the other one, and the second is the real connect.\n\n Args:\n serial (str):\n\n Returns:\n bool: If success\n \"\"\"\n # Disconnect offline device before connecting\n for device in self.list_device():\n if device.status == 'offline':\n logger.warning(f'Device {serial} is offline, disconnect it before connecting')\n self.adb_disconnect(serial)\n elif device.status == 'unauthorized':\n logger.error(f'Device {serial} is unauthorized, please accept ADB debugging on your device')\n elif device.status == 'device':\n pass\n else:\n logger.warning(f'Device {serial} is is having a unknown status: {device.status}')\n\n # Skip for emulator-5554\n if 'emulator-' in serial:\n logger.info(f'\"{serial}\" is a `emulator-*` serial, skip adb connect')\n return True\n if re.match(r'^[a-zA-Z0-9]+$', serial):\n logger.info(f'\"{serial}\" seems to be a Android serial, skip adb connect')\n return True\n\n # Try to connect\n for _ in range(3):\n msg = self.adb_client.connect(serial)\n logger.info(msg)\n if 'connected' in msg:\n # Connected to 127.0.0.1:59865\n # Already connected to 127.0.0.1:59865\n return True\n elif 'bad port' in msg:\n # bad port number '598265' in '127.0.0.1:598265'\n logger.error(msg)\n possible_reasons('Serial incorrect, might be a typo')\n raise RequestHumanTakeover\n elif '(10061)' in msg:\n # cannot connect to 127.0.0.1:55555:\n # No connection could be made because the target machine actively refused it. (10061)\n logger.info(msg)\n logger.warning('No such device exists, please restart the emulator or set a correct serial')\n raise EmulatorNotRunningError\n\n # Failed to connect\n logger.warning(f'Failed to connect {serial} after 3 trial, assume connected')\n self.detect_device()\n return False\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_connect(self, serial):\n # No adb connect if over http\n return True\n\n def adb_disconnect(self, serial):\n msg = self.adb_client.disconnect(serial)\n if msg:\n logger.info(msg)\n\n del_cached_property(self, 'hermit_session')\n del_cached_property(self, 'droidcast_session')\n del_cached_property(self, 'minitouch_builder')\n del_cached_property(self, 'reverse_server')\n\n def adb_restart(self):\n \"\"\"\n Reboot adb client\n \"\"\"\n logger.info('Restart adb')\n # Kill current client\n self.adb_client.server_kill()\n # Init adb client\n del_cached_property(self, 'adb_client')\n _ = self.adb_client\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_reconnect(self):\n \"\"\"\n Reboot adb client if no device found, otherwise try reconnecting device.\n \"\"\"\n if self.config.Emulator_AdbRestart and len(self.list_device()) == 0:\n # Restart Adb\n self.adb_restart()\n # Connect to device\n self.adb_connect(self.serial)\n self.detect_device()\n else:\n self.adb_disconnect(self.serial)\n self.adb_connect(self.serial)\n self.detect_device()\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_reconnect(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'adb_reconnect() is skipped, you may need to restart ATX manually'\n )\n\n def install_uiautomator2(self):\n \"\"\"\n Init uiautomator2 and remove minicap.\n \"\"\"\n logger.info('Install uiautomator2')\n init = u2.init.Initer(self.adb, loglevel=logging.DEBUG)\n # MuMu X has no ro.product.cpu.abi, pick abi from ro.product.cpu.abilist\n if init.abi not in ['x86_64', 'x86', 'arm64-v8a', 'armeabi-v7a', 'armeabi']:\n init.abi = init.abis[0]\n init.set_atx_agent_addr('127.0.0.1:7912')\n try:\n init.install()\n except ConnectionError:\n u2.init.GITHUB_BASEURL = 'http://tool.appetizer.io/openatx'\n init.install()\n self.uninstall_minicap()\n\n def uninstall_minicap(self):\n \"\"\" minicap can't work or will send compressed images on some emulators. \"\"\"\n logger.info('Removing minicap')\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap\"])\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap.so\"])\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def restart_atx(self):\n \"\"\"\n Minitouch supports only one connection at a time.\n Restart ATX to kick the existing one.\n \"\"\"\n logger.info('Restart ATX')\n atx_agent_path = '/data/local/tmp/atx-agent'\n self.adb_shell([atx_agent_path, 'server', '--stop'])\n self.adb_shell([atx_agent_path, 'server', '--nouia', '-d', '--addr', '127.0.0.1:7912'])\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def restart_atx(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'restart_atx() is skipped, you may need to restart ATX manually'\n )\n\n @staticmethod\n def sleep(second):\n \"\"\"\n Args:\n second(int, float, tuple):\n \"\"\"\n time.sleep(ensure_time(second))\n\n _orientation_description = {\n 0: 'Normal',\n 1: 'HOME key on the right',\n 2: 'HOME key on the top',\n 3: 'HOME key on the left',\n }\n orientation = 0\n\n @retry\n def get_orientation(self):\n \"\"\"\n Rotation of the phone\n\n Returns:\n int:\n 0: 'Normal'\n 1: 'HOME key on the right'\n 2: 'HOME key on the top'\n 3: 'HOME key on the left'\n \"\"\"\n _DISPLAY_RE = re.compile(\n r'.*DisplayViewport{.*valid=true, .*orientation=(?P<orientation>\\d+), .*deviceWidth=(?P<width>\\d+), deviceHeight=(?P<height>\\d+).*'\n )\n output = self.adb_shell(['dumpsys', 'display'])\n\n res = _DISPLAY_RE.search(output, 0)\n\n if res:\n o = int(res.group('orientation'))\n if o in Connection._orientation_description:\n pass\n else:\n o = 0\n logger.warning(f'Invalid device orientation: {o}, assume it is normal')\n else:\n o = 0\n logger.warning('Unable to get device orientation, assume it is normal')\n\n self.orientation = o\n logger.attr('Device Orientation', f'{o} ({Connection._orientation_description.get(o, \"Unknown\")})')\n return o\n\n @retry\n def list_device(self):\n \"\"\"\n Returns:\n SelectedGrids[AdbDeviceWithStatus]:\n \"\"\"\n devices = []\n try:\n with self.adb_client._connect() as c:\n c.send_command(\"host:devices\")\n c.check_okay()\n output = c.read_string_block()\n for line in output.splitlines():\n parts = line.strip().split(\"\\t\")\n if len(parts) != 2:\n continue\n device = AdbDeviceWithStatus(self.adb_client, parts[0], parts[1])\n devices.append(device)\n except ConnectionResetError as e:\n # Happens only on CN users.\n # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。\n logger.error(e)\n if '强迫关闭' in str(e):\n logger.critical('无法连接至ADB服务,请关闭UU加速器、原神私服、以及一些劣质代理软件。'\n '它们会劫持电脑上所有的网络连接,包括Alas与模拟器之间的本地连接。')\n return SelectedGrids(devices)\n\n def detect_device(self):\n \"\"\"\n Find available devices\n If serial=='auto' and only 1 device detected, use it\n \"\"\"\n logger.hr('Detect device')\n logger.info('Here are the available devices, '\n 'copy to Alas.Emulator.Serial to use it or set Alas.Emulator.Serial=\"auto\"')\n devices = self.list_device()\n\n # Show available devices\n available = devices.select(status='device')\n for device in available:\n logger.info(device.serial)\n if not len(available):\n logger.info('No available devices')\n\n # Show unavailable devices if having any\n unavailable = devices.delete(available)\n if len(unavailable):\n logger.info('Here are the devices detected but unavailable')\n for device in unavailable:\n logger.info(f'{device.serial} ({device.status})')\n\n # Auto device detection\n if self.config.Emulator_Serial == 'auto':\n if available.count == 0:\n logger.critical('No available device found, auto device detection cannot work, '\n 'please set an exact serial in Alas.Emulator.Serial instead of using \"auto\"')\n raise RequestHumanTakeover\n elif available.count == 1:\n logger.info(f'Auto device detection found only one device, using it')\n self.serial = devices[0].serial\n del_cached_property(self, 'adb')\n else:\n logger.critical('Multiple devices found, auto device detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.Serial')\n raise RequestHumanTakeover\n\n # Handle LDPlayer\n # LDPlayer serial jumps between `127.0.0.1:5555+{X}` and `emulator-5554+{X}`\n port_serial, emu_serial = get_serial_pair(self.serial)\n if port_serial and emu_serial:\n # Might be LDPlayer, check connected devices\n port_device = devices.select(serial=port_serial).first_or_none()\n emu_device = devices.select(serial=emu_serial).first_or_none()\n if port_device and emu_device:\n # Paired devices found, check status to get the correct one\n if port_device.status == 'device' and emu_device.status == 'offline':\n self.serial = port_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif port_device.status == 'offline' and emu_device.status == 'device':\n self.serial = emu_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif not devices.select(serial=self.serial):\n # Current serial not found\n if port_device and not emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {port_serial} found. '\n f'Using serial: {port_serial}')\n self.serial = port_serial\n if not port_device and emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {emu_serial} found. '\n f'Using serial: {emu_serial}')\n self.serial = emu_serial\n\n @retry\n def list_package(self, show_log=True):\n \"\"\"\n Find all packages on device.\n Use dumpsys first for faster.\n \"\"\"\n # 80ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(r'dumpsys package | grep \"Package \\[\"')\n packages = re.findall(r'Package \\[([^\\s]+)\\]', output)\n if len(packages):\n return packages\n\n # 200ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(['pm', 'list', 'packages'])\n packages = re.findall(r'package:([^\\s]+)', output)\n return packages\n\n def list_azurlane_packages(self, show_log=True):\n \"\"\"\n Args:\n show_log:\n\n Returns:\n list[str]: List of package names\n \"\"\"\n packages = self.list_package(show_log=show_log)\n packages = [p for p in packages if p in server_.VALID_PACKAGE]\n return packages\n\n def detect_package(self, set_config=True):\n \"\"\"\n Show all possible packages with the given keyword on this device.\n \"\"\"\n logger.hr('Detect package')\n packages = self.list_azurlane_packages()\n\n # Show packages\n logger.info(f'Here are the available packages in device \"{self.serial}\", '\n f'copy to Alas.Emulator.PackageName to use it')\n if len(packages):\n for package in packages:\n logger.info(package)\n else:\n logger.info(f'No available packages on device \"{self.serial}\"')\n\n # Auto package detection\n if len(packages) == 0:\n logger.critical(f'No Blue Archive package found, '\n f'please confirm Blue Archive has been installed on device \"{self.serial}\"')\n raise RequestHumanTakeover\n if len(packages) == 1:\n logger.info('Auto package detection found only one package, using it')\n self.package = packages[0]\n # Set config\n if set_config:\n self.config.Emulator_PackageName = server_.to_server(self.package)\n # Set server\n # logger.info('Server changed, release resources')\n # set_server(self.package)\n else:\n logger.critical(\n f'Multiple Blue Archive packages found, auto package detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.PackageName')\n raise RequestHumanTakeover" }, { "identifier": "RETRY_TRIES", "path": "module/device/method/utils.py", "snippet": "RETRY_TRIES = 5" }, { "identifier": "retry_sleep", "path": "module/device/method/utils.py", "snippet": "def retry_sleep(trial):\n # First trial\n if trial == 0:\n pass\n # Failed once, fast retry\n elif trial == 1:\n pass\n # Failed twice\n elif trial == 2:\n time.sleep(1)\n # Failed more\n else:\n time.sleep(RETRY_DELAY)" }, { "identifier": "handle_adb_error", "path": "module/device/method/utils.py", "snippet": "def handle_adb_error(e):\n \"\"\"\n Args:\n e (Exception):\n\n Returns:\n bool: If should retry\n \"\"\"\n text = str(e)\n if 'not found' in text:\n # When you call `adb disconnect <serial>`\n # Or when adb server was killed (low possibility)\n # AdbError(device '127.0.0.1:59865' not found)\n logger.error(e)\n return True\n elif 'timeout' in text:\n # AdbTimeout(adb read timeout)\n logger.error(e)\n return True\n elif 'closed' in text:\n # AdbError(closed)\n # Usually after AdbTimeout(adb read timeout)\n # Disconnect and re-connect should fix this.\n logger.error(e)\n return True\n elif 'device offline' in text:\n # AdbError(device offline)\n # When a device that has been connected wirelessly is disconnected passively,\n # it does not disappear from the adb device list,\n # but will be displayed as offline.\n # In many cases, such as disconnection and recovery caused by network fluctuations,\n # or after VMOS reboot when running Alas on a phone,\n # the device is still available, but it needs to be disconnected and re-connected.\n logger.error(e)\n return True\n elif 'is offline' in text:\n # RuntimeError: USB device 127.0.0.1:7555 is offline\n # Raised by uiautomator2 when current adb service is killed by another version of adb service.\n logger.error(e)\n return True\n elif 'unknown host service' in text:\n # AdbError(unknown host service)\n # Another version of ADB service started, current ADB service has been killed.\n # Usually because user opened a Chinese emulator, which uses ADB from the Stone Age.\n logger.error(e)\n return True\n else:\n # AdbError()\n logger.exception(e)\n possible_reasons(\n 'If you are using BlueStacks or LD player or WSA, please enable ADB in the settings of your emulator',\n 'Emulator died, please restart emulator',\n 'Serial incorrect, no such device exists or emulator is not running'\n )\n return False" }, { "identifier": "RequestHumanTakeover", "path": "module/exception.py", "snippet": "class RequestHumanTakeover(Exception):\n # Request human takeover\n # Alas is unable to handle such error, probably because of wrong settings.\n pass" }, { "identifier": "ScriptError", "path": "module/exception.py", "snippet": "class ScriptError(Exception):\n # This is likely to be a mistake of developers, but sometimes a random issue\n pass" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" } ]
import asyncio import json import re import socket import time import websockets from functools import wraps from typing import List from adbutils.errors import AdbError from uiautomator2 import _Service from module.base.decorator import Config, cached_property, del_cached_property from module.base.timer import Timer from module.base.utils import * from module.device.connection import Connection from module.device.method.utils import RETRY_TRIES, retry_sleep, handle_adb_error from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
12,343
def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e: if handle_adb_error(e): def init(): self.adb_reconnect() else: break except BrokenPipeError as e: logger.error(e) def init(): del_cached_property(self, 'minitouch_builder') # Unknown, probably a trucked image except Exception as e: logger.exception(e) def init(): pass logger.critical(f'Retry {func.__name__}() failed') raise RequestHumanTakeover return retry_wrapper class Minitouch(Connection): _minitouch_port: int = 0 _minitouch_client: socket.socket _minitouch_pid: int _minitouch_ws: websockets.WebSocketClientProtocol max_x: int max_y: int @cached_property def minitouch_builder(self): self.minitouch_init() return CommandBuilder(self) @Config.when(DEVICE_OVER_HTTP=False) def minitouch_init(self): logger.hr('MiniTouch init') max_x, max_y = 1280, 720 max_contacts = 2 max_pressure = 50 self.get_orientation() self._minitouch_port = self.adb_forward("localabstract:minitouch") # No need, minitouch already started by uiautomator2 # self.adb_shell([self.config.MINITOUCH_FILEPATH_REMOTE])
def random_normal_distribution(a, b, n=5): output = np.mean(np.random.uniform(a, b, size=n)) return output def random_theta(): theta = np.random.uniform(0, 2 * np.pi) return np.array([np.sin(theta), np.cos(theta)]) def random_rho(dis): return random_normal_distribution(-dis, dis) def insert_swipe(p0, p3, speed=15, min_distance=10): """ Insert way point from start to end. First generate a cubic bézier curve Args: p0: Start point. p3: End point. speed: Average move speed, pixels per 10ms. min_distance: Returns: list[list[int]]: List of points. Examples: > insert_swipe((400, 400), (600, 600), speed=20) [[400, 400], [406, 406], [416, 415], [429, 428], [444, 442], [462, 459], [481, 478], [504, 500], [527, 522], [545, 540], [560, 557], [573, 570], [584, 582], [592, 590], [597, 596], [600, 600]] """ p0 = np.array(p0) p3 = np.array(p3) # Random control points in Bézier curve distance = np.linalg.norm(p3 - p0) p1 = 2 / 3 * p0 + 1 / 3 * p3 + random_theta() * random_rho(distance * 0.1) p2 = 1 / 3 * p0 + 2 / 3 * p3 + random_theta() * random_rho(distance * 0.1) # Random `t` on Bézier curve, sparse in the middle, dense at start and end segments = max(int(distance / speed) + 1, 5) lower = random_normal_distribution(-85, -60) upper = random_normal_distribution(80, 90) theta = np.arange(lower + 0., upper + 0.0001, (upper - lower) / segments) ts = np.sin(theta / 180 * np.pi) ts = np.sign(ts) * abs(ts) ** 0.9 ts = (ts - min(ts)) / (max(ts) - min(ts)) # Generate cubic Bézier curve points = [] prev = (-100, -100) for t in ts: point = p0 * (1 - t) ** 3 + 3 * p1 * t * (1 - t) ** 2 + 3 * p2 * t ** 2 * (1 - t) + p3 * t ** 3 point = point.astype(int).tolist() if np.linalg.norm(np.subtract(point, prev)) < min_distance: continue points.append(point) prev = point # Delete nearing points if len(points[1:]): distance = np.linalg.norm(np.subtract(points[1:], points[0]), axis=1) mask = np.append(True, distance > min_distance) points = np.array(points)[mask].tolist() else: points = [p0, p3] return points class Command: def __init__( self, operation: str, contact: int = 0, x: int = 0, y: int = 0, ms: int = 10, pressure: int = 100 ): """ See https://github.com/openstf/minitouch#writable-to-the-socket Args: operation: c, r, d, m, u, w contact: x: y: ms: pressure: """ self.operation = operation self.contact = contact self.x = x self.y = y self.ms = ms self.pressure = pressure def to_minitouch(self) -> str: """ String that write into minitouch socket """ if self.operation == 'c': return f'{self.operation}\n' elif self.operation == 'r': return f'{self.operation}\n' elif self.operation == 'd': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'm': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'u': return f'{self.operation} {self.contact}\n' elif self.operation == 'w': return f'{self.operation} {self.ms}\n' else: return '' def to_atx_agent(self, max_x=1280, max_y=720) -> str: """ Dict that send to atx-agent, $DEVICE_URL/minitouch See https://github.com/openatx/atx-agent#minitouch%E6%93%8D%E4%BD%9C%E6%96%B9%E6%B3%95 """ x, y = self.x / max_x, self.y / max_y if self.operation == 'c': out = dict(operation=self.operation) elif self.operation == 'r': out = dict(operation=self.operation) elif self.operation == 'd': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'm': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'u': out = dict(operation=self.operation, index=self.contact) elif self.operation == 'w': out = dict(operation=self.operation, milliseconds=self.ms) else: out = dict() return json.dumps(out) class CommandBuilder: """Build command str for minitouch. You can use this, to custom actions as you wish:: with safe_connection(_DEVICE_ID) as connection: builder = CommandBuilder() builder.down(0, 400, 400, 50) builder.commit() builder.move(0, 500, 500, 50) builder.commit() builder.move(0, 800, 400, 50) builder.commit() builder.up(0) builder.commit() builder.publish(connection) """ DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None for _ in range(RETRY_TRIES): try: if callable(init): retry_sleep(_) init() return func(self, *args, **kwargs) # Can't handle except RequestHumanTakeover: break # When adb server was killed except ConnectionResetError as e: logger.error(e) def init(): self.adb_reconnect() # Emulator closed except ConnectionAbortedError as e: logger.error(e) def init(): self.adb_reconnect() # MinitouchNotInstalledError: Received empty data from minitouch except MinitouchNotInstalledError as e: logger.error(e) def init(): self.install_uiautomator2() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # MinitouchOccupiedError: Timeout when connecting to minitouch except MinitouchOccupiedError as e: logger.error(e) def init(): self.restart_atx() if self._minitouch_port: self.adb_forward_remove(f'tcp:{self._minitouch_port}') del_cached_property(self, 'minitouch_builder') # AdbError except AdbError as e: if handle_adb_error(e): def init(): self.adb_reconnect() else: break except BrokenPipeError as e: logger.error(e) def init(): del_cached_property(self, 'minitouch_builder') # Unknown, probably a trucked image except Exception as e: logger.exception(e) def init(): pass logger.critical(f'Retry {func.__name__}() failed') raise RequestHumanTakeover return retry_wrapper class Minitouch(Connection): _minitouch_port: int = 0 _minitouch_client: socket.socket _minitouch_pid: int _minitouch_ws: websockets.WebSocketClientProtocol max_x: int max_y: int @cached_property def minitouch_builder(self): self.minitouch_init() return CommandBuilder(self) @Config.when(DEVICE_OVER_HTTP=False) def minitouch_init(self): logger.hr('MiniTouch init') max_x, max_y = 1280, 720 max_contacts = 2 max_pressure = 50 self.get_orientation() self._minitouch_port = self.adb_forward("localabstract:minitouch") # No need, minitouch already started by uiautomator2 # self.adb_shell([self.config.MINITOUCH_FILEPATH_REMOTE])
retry_timeout = Timer(2).start()
3
2023-11-01 07:09:45+00:00
16k
BrianPugh/cyclopts
tests/test_help.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)\n\n # Everything below must be kw_only\n\n default_command: Optional[Callable] = field(default=None, converter=_validate_default_command, kw_only=True)\n default_parameter: Optional[Parameter] = field(default=None, kw_only=True)\n\n version: Union[None, str, Callable] = field(factory=_default_version, kw_only=True)\n version_flags: Tuple[str, ...] = field(\n default=[\"--version\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n show: bool = field(default=True, kw_only=True)\n\n help_flags: Tuple[str, ...] = field(\n default=[\"--help\", \"-h\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n # This can ONLY ever be Tuple[Union[Group, str], ...] due to converter.\n # The other types is to make mypy happy for Cyclopts users.\n group: Union[Group, str, Tuple[Union[Group, str], ...]] = field(\n default=None, converter=to_tuple_converter, kw_only=True\n )\n\n group_arguments: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_arguments()),\n kw_only=True,\n )\n group_parameters: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_parameters()),\n kw_only=True,\n )\n group_commands: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_commands()),\n kw_only=True,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n validator: List[Callable] = field(default=None, converter=to_list_converter, kw_only=True)\n\n ######################\n # Private Attributes #\n ######################\n # Maps CLI-name of a command to a function handle.\n _commands: Dict[str, \"App\"] = field(init=False, factory=dict)\n\n _parents: List[\"App\"] = field(init=False, factory=list)\n\n _meta: \"App\" = field(init=False, default=None)\n _meta_parent: \"App\" = field(init=False, default=None)\n\n def __attrs_post_init__(self):\n if self.help_flags:\n self.command(\n self.help_print,\n name=self.help_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display this message and exit.\",\n )\n if self.version_flags:\n self.command(\n self.version_print,\n name=self.version_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display application version.\",\n )\n\n ###########\n # Methods #\n ###########\n\n @property\n def name(self) -> Tuple[str, ...]:\n \"\"\"Application name(s). Dynamically derived if not previously set.\"\"\"\n if self._name:\n return self._name\n elif self.default_command is None:\n name = Path(sys.argv[0]).name\n if name == \"__main__.py\":\n name = _get_root_module_name()\n return (name,)\n else:\n return (_format_name(self.default_command.__name__),)\n\n @property\n def help(self) -> str:\n if self._help is not None:\n return self._help\n elif self.default_command is None:\n # Try and fallback to a meta-app docstring.\n if self._meta is None:\n return \"\"\n else:\n return self.meta.help\n elif self.default_command.__doc__ is None:\n return \"\"\n else:\n return self.default_command.__doc__\n\n @help.setter\n def help(self, value):\n self._help = value\n\n def version_print(self) -> None:\n \"\"\"Print the application version.\"\"\"\n print(self.version() if callable(self.version) else self.version)\n\n def __getitem__(self, key: str) -> \"App\":\n \"\"\"Get the subapp from a command string.\n\n All commands get registered to Cyclopts as subapps.\n The actual function handler is at ``app[key].default_command``.\n \"\"\"\n if self._meta:\n with suppress(KeyError):\n return self.meta[key]\n return self._commands[key]\n\n def __contains__(self, k: str) -> bool:\n if k in self._commands:\n return True\n if self._meta_parent:\n return k in self._meta_parent\n return False\n\n @property\n def meta(self) -> \"App\":\n if self._meta is None:\n self._meta = type(self)(\n group_commands=copy(self.group_commands),\n group_arguments=copy(self.group_arguments),\n group_parameters=copy(self.group_parameters),\n )\n self._meta._meta_parent = self\n return self._meta\n\n def _parse_command_chain(self, tokens):\n command_chain = []\n app = self\n apps = [app]\n unused_tokens = tokens\n\n command_mapping = _combined_meta_command_mapping(app)\n\n for i, token in enumerate(tokens):\n if token in self.help_flags:\n break\n try:\n app = command_mapping[token]\n apps.append(app)\n unused_tokens = tokens[i + 1 :]\n except KeyError:\n break\n command_chain.append(token)\n command_mapping = _combined_meta_command_mapping(app)\n\n return command_chain, apps, unused_tokens\n\n def command(\n self,\n obj: Optional[Callable] = None,\n name: Union[None, str, Iterable[str]] = None,\n **kwargs,\n ) -> Callable:\n \"\"\"Decorator to register a function as a CLI command.\n\n Parameters\n ----------\n obj: Optional[Callable]\n Function or :class:`App` to be registered as a command.\n name: Union[None, str, Iterable[str]]\n Name(s) to register the ``obj`` to.\n If not provided, defaults to:\n\n * If registering an :class:`App`, then the app's name.\n * If registering a function, then the function's name.\n `**kwargs`\n Any argument that :class:`App` can take.\n \"\"\"\n if obj is None: # Called ``@app.command(...)``\n return partial(self.command, name=name, **kwargs)\n\n if isinstance(obj, App):\n app = obj\n\n if app._name is None and name is None:\n raise ValueError(\"Sub-app MUST have a name specified.\")\n\n if kwargs:\n raise ValueError(\"Cannot supplied additional configuration when registering a sub-App.\")\n else:\n validate_command(obj)\n kwargs.setdefault(\"help_flags\", [])\n kwargs.setdefault(\"version_flags\", [])\n if \"group_commands\" not in kwargs:\n kwargs[\"group_commands\"] = copy(self.group_commands)\n if \"group_parameters\" not in kwargs:\n kwargs[\"group_parameters\"] = copy(self.group_parameters)\n if \"group_arguments\" not in kwargs:\n kwargs[\"group_arguments\"] = copy(self.group_arguments)\n app = App(default_command=obj, **kwargs)\n # app.name is handled below\n\n if name is None:\n name = app.name\n else:\n app._name = name\n\n for n in to_tuple_converter(name):\n if n in self:\n raise CommandCollisionError(f'Command \"{n}\" already registered.')\n\n # Warning: app._name may not align with command name\n self._commands[n] = app\n\n app._parents.append(self)\n\n return obj\n\n def default(\n self,\n obj: Optional[Callable] = None,\n *,\n converter=None,\n validator=None,\n ):\n \"\"\"Decorator to register a function as the default action handler.\"\"\"\n if obj is None: # Called ``@app.default_command(...)``\n return partial(self.default, converter=converter, validator=validator)\n\n if isinstance(obj, App): # Registering a sub-App\n raise TypeError(\"Cannot register a sub-App to default.\")\n\n if self.default_command is not None:\n raise CommandCollisionError(f\"Default command previously set to {self.default_command}.\")\n\n validate_command(obj)\n self.default_command = obj\n if converter:\n self.converter = converter\n if validator:\n self.validator = validator\n return obj\n\n def parse_known_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> Tuple[Callable, inspect.BoundArguments, List[str]]:\n \"\"\"Interpret arguments into a function, :class:`~inspect.BoundArguments`, and any remaining unknown tokens.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``\n\n Returns\n -------\n command: Callable\n Bare function to execute.\n\n bound: inspect.BoundArguments\n Bound arguments for ``command``.\n\n unused_tokens: List[str]\n Any remaining CLI tokens that didn't get parsed for ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n command_chain, apps, unused_tokens = self._parse_command_chain(tokens)\n command_app = apps[-1]\n\n try:\n parent_app = apps[-2]\n except IndexError:\n parent_app = None\n\n try:\n if command_app.default_command:\n command = command_app.default_command\n resolved_command = ResolvedCommand(\n command,\n _resolve_default_parameter(apps),\n command_app.group_arguments,\n command_app.group_parameters,\n parse_docstring=False,\n )\n # We want the resolved group that ``app`` belongs to.\n if parent_app is None:\n command_groups = []\n else:\n command_groups = _get_command_groups(parent_app, command_app)\n\n bound, unused_tokens = create_bound_arguments(resolved_command, unused_tokens)\n try:\n if command_app.converter:\n bound.arguments = command_app.converter(**bound.arguments)\n for command_group in command_groups:\n if command_group.converter:\n bound.arguments = command_group.converter(**bound.arguments)\n for validator in command_app.validator:\n validator(**bound.arguments)\n for command_group in command_groups:\n for validator in command_group.validator:\n validator(**bound.arguments)\n except (AssertionError, ValueError, TypeError) as e:\n new_exception = ValidationError(value=e.args[0])\n raise new_exception from e\n\n return command, bound, unused_tokens\n else:\n if unused_tokens:\n raise InvalidCommandError(unused_tokens=unused_tokens)\n else:\n # Running the application with no arguments and no registered\n # ``default_command`` will default to ``help_print``.\n command = self.help_print\n bound = inspect.signature(command).bind(tokens=tokens, console=console)\n return command, bound, []\n except CycloptsError as e:\n e.app = command_app\n if command_chain:\n e.command_chain = command_chain\n raise\n\n raise NotImplementedError(\"Should never get here.\")\n\n def parse_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ) -> Tuple[Callable, inspect.BoundArguments]:\n \"\"\"Interpret arguments into a function and :class:`~inspect.BoundArguments`.\n\n **Does** handle special flags like \"version\" or \"help\".\n\n Raises\n ------\n UnusedCliTokensError\n If any tokens remain after parsing.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n command: Callable\n Function associated with command action.\n\n bound: inspect.BoundArguments\n Parsed and converted ``args`` and ``kwargs`` to be used when calling ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n meta_parent = self\n\n try:\n # Special flags (help/version) get bubbled up to the root app.\n # The root ``help_print`` will then traverse the meta app linked list.\n\n # The Help Flag is allowed to be anywhere in the token stream.\n help_flag_index = None\n for help_flag in self.help_flags:\n try:\n help_flag_index = tokens.index(help_flag)\n break\n except ValueError:\n pass\n\n if help_flag_index is not None:\n tokens.pop(help_flag_index)\n command = self.help_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.help_print\n bound = inspect.signature(command).bind(tokens, console=console)\n unused_tokens = []\n elif any(flag in tokens for flag in self.version_flags):\n # Version\n command = self.version_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.version_print\n bound = inspect.signature(command).bind()\n unused_tokens = []\n else:\n # Normal parsing\n command, bound, unused_tokens = self.parse_known_args(tokens, console=console)\n if unused_tokens:\n raise UnusedCliTokensError(\n target=command,\n unused_tokens=unused_tokens,\n )\n except CycloptsError as e:\n e.verbose = verbose\n e.root_input_tokens = tokens\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n else:\n raise\n\n return command, bound\n\n def __call__(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ):\n \"\"\"Interprets and executes a command.\n\n Parameters\n ----------\n tokens : Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n return_value: Any\n The value the parsed command handler returns.\n \"\"\"\n tokens = normalize_tokens(tokens)\n command, bound = self.parse_args(\n tokens,\n console=console,\n print_error=print_error,\n exit_on_error=exit_on_error,\n verbose=verbose,\n )\n try:\n return command(*bound.args, **bound.kwargs)\n except Exception as e:\n if PydanticValidationError is not None and isinstance(e, PydanticValidationError):\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n raise\n\n def help_print(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> None:\n \"\"\"Print the help page.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Tokens to interpret for traversing the application command structure.\n If not provided, defaults to ``sys.argv``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n if console is None:\n console = Console()\n\n command_chain, apps, _ = self._parse_command_chain(tokens)\n executing_app = apps[-1]\n\n # Print the:\n # my-app command COMMAND [ARGS] [OPTIONS]\n if executing_app.usage is None:\n console.print(format_usage(self, command_chain))\n elif executing_app.usage: # i.e. skip empty-string.\n console.print(executing_app.usage + \"\\n\")\n\n # Print the App/Command's Doc String.\n console.print(format_doc(self, executing_app))\n\n def walk_apps():\n # Iterates from deepest to shallowest meta-apps\n meta_list = [] # shallowest to deepest\n meta_list.append(executing_app)\n meta = executing_app\n while (meta := meta._meta) and meta.default_command:\n meta_list.append(meta)\n yield from reversed(meta_list)\n\n panels: Dict[str, Tuple[Group, HelpPanel]] = {}\n # Handle commands first; there's an off chance they may be \"upgraded\"\n # to an argument/parameter panel.\n for subapp in walk_apps():\n # Handle Commands\n for group, elements in groups_from_app(subapp):\n if not group.show:\n continue\n\n try:\n _, command_panel = panels[group.name]\n except KeyError:\n command_panel = HelpPanel(\n format=\"command\",\n title=group.name,\n )\n panels[group.name] = (group, command_panel)\n\n if group.help:\n if command_panel.description:\n command_panel.description += \"\\n\" + group.help\n else:\n command_panel.description = group.help\n\n command_panel.entries.extend(format_command_entries(elements))\n\n # Handle Arguments/Parameters\n for subapp in walk_apps():\n if subapp.default_command:\n command = ResolvedCommand(\n subapp.default_command,\n subapp.default_parameter,\n subapp.group_arguments,\n subapp.group_parameters,\n )\n for group, iparams in command.groups_iparams:\n if not group.show:\n continue\n cparams = [command.iparam_to_cparam[x] for x in iparams]\n try:\n _, existing_panel = panels[group.name]\n except KeyError:\n existing_panel = None\n new_panel = create_parameter_help_panel(group, iparams, cparams)\n\n if existing_panel:\n # An imperfect merging process\n existing_panel.format = \"parameter\"\n existing_panel.entries = new_panel.entries + existing_panel.entries # Commands go last\n if new_panel.description:\n if existing_panel.description:\n existing_panel.description += \"\\n\" + new_panel.description\n else:\n existing_panel.description = new_panel.description\n else:\n panels[group.name] = (group, new_panel)\n\n groups = [x[0] for x in panels.values()]\n help_panels = [x[1] for x in panels.values()]\n\n for help_panel in sort_groups(groups, help_panels)[1]:\n help_panel.remove_duplicates()\n if help_panel.format == \"command\":\n # don't sort format == \"parameter\" because order may matter there!\n help_panel.sort()\n console.print(help_panel)\n\n def interactive_shell(\n self,\n prompt: str = \"$ \",\n quit: Union[None, str, Iterable[str]] = None,\n dispatcher: Optional[Dispatcher] = None,\n **kwargs,\n ) -> None:\n \"\"\"Create a blocking, interactive shell.\n\n All registered commands can be executed in the shell.\n\n Parameters\n ----------\n prompt: str\n Shell prompt. Defaults to ``\"$ \"``.\n quit: Union[str, Iterable[str]]\n String or list of strings that will cause the shell to exit and this method to return.\n Defaults to ``[\"q\", \"quit\"]``.\n dispatcher: Optional[Dispatcher]\n Optional function that subsequently invokes the command.\n The ``dispatcher`` function must have signature:\n\n .. code-block:: python\n\n def dispatcher(command: Callable, bound: inspect.BoundArguments) -> Any:\n return command(*bound.args, **bound.kwargs)\n\n The above is the default dispatcher implementation.\n `**kwargs`\n Get passed along to :meth:`parse_args`.\n \"\"\"\n if os.name == \"posix\":\n print(\"Interactive shell. Press Ctrl-D to exit.\")\n else: # Windows\n print(\"Interactive shell. Press Ctrl-Z followed by Enter to exit.\")\n\n if quit is None:\n quit = [\"q\", \"quit\"]\n if isinstance(quit, str):\n quit = [quit]\n\n def default_dispatcher(command, bound):\n return command(*bound.args, **bound.kwargs)\n\n if dispatcher is None:\n dispatcher = default_dispatcher\n\n kwargs.setdefault(\"exit_on_error\", False)\n\n while True:\n try:\n user_input = input(prompt)\n except EOFError:\n break\n\n tokens = normalize_tokens(user_input)\n if not tokens:\n continue\n if tokens[0] in quit:\n break\n\n try:\n command, bound = self.parse_args(tokens, **kwargs)\n dispatcher(command, bound)\n except CycloptsError:\n # Upstream ``parse_args`` already printed the error\n pass\n except Exception:\n print(traceback.format_exc())\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n non_defaults = {}\n for a in self.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if not a.init:\n continue\n v = getattr(self, a.name)\n # Compare types first because of some weird attribute issues.\n if type(v) != type(a.default) or v != a.default: # noqa: E721\n non_defaults[a.alias] = v\n\n signature = \", \".join(f\"{k}={v!r}\" for k, v in non_defaults.items())\n return f\"{type(self).__name__}({signature})\"" }, { "identifier": "Group", "path": "cyclopts/group.py", "snippet": "class Group:\n name: str = \"\"\n\n help: str = \"\"\n\n # All below parameters are keyword-only\n _show: Optional[bool] = field(default=None, alias=\"show\", kw_only=True)\n\n _sort_key: Any = field(\n default=None,\n alias=\"sort_key\",\n converter=lambda x: NO_USER_SORT_KEY if x is None else x,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n\n validator: Tuple[Callable, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n kw_only=True,\n )\n\n default_parameter: Optional[\"Parameter\"] = field(\n default=None,\n validator=_group_default_parameter_must_be_none,\n kw_only=True,\n )\n\n def __str__(self):\n return self.name\n\n @property\n def show(self):\n return bool(self.name) if self._show is None else self._show\n\n @show.setter\n def show(self, value):\n self._show = value\n\n @property\n def sort_key(self):\n return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key\n\n @sort_key.setter\n def sort_key(self, value):\n self._sort_key = value\n\n @classmethod\n def create_default_arguments(cls):\n return cls(\"Arguments\")\n\n @classmethod\n def create_default_parameters(cls):\n return cls(\"Parameters\")\n\n @classmethod\n def create_default_commands(cls):\n return cls(\"Commands\")\n\n @classmethod\n def create_ordered(cls, *args, sort_key=None, **kwargs):\n \"\"\"Create a group with a globally incremented :attr:`~Group.sort_key`.\n\n Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.\n\n If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).\n \"\"\"\n count = next(_sort_key_counter)\n if sort_key is None:\n sort_key = (NO_USER_SORT_KEY, count)\n elif is_iterable(sort_key):\n sort_key = (tuple(sort_key), count)\n else:\n sort_key = (sort_key, count)\n return cls(*args, sort_key=sort_key, **kwargs)" }, { "identifier": "Parameter", "path": "cyclopts/parameter.py", "snippet": "class Parameter:\n \"\"\"Cyclopts configuration for individual function parameters.\"\"\"\n\n # All documentation has been moved to ``docs/api.rst`` for greater control with attrs.\n\n name: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert))\n\n validator: Tuple[Callable, ...] = field(\n default=(),\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n )\n\n negative: Union[None, Tuple[str, ...]] = field(default=None, converter=optional_to_tuple_converter)\n\n group: Tuple[Union[Group, str], ...] = field(default=None, converter=to_tuple_converter, hash=False)\n\n parse: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n _show: Optional[bool] = field(default=None, alias=\"show\")\n\n show_default: Optional[bool] = field(default=None)\n\n show_choices: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n help: Optional[str] = field(default=None)\n\n show_env_var: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n env_var: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n negative_bool: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--no-\",)),\n validator=_double_hyphen_validator,\n )\n\n negative_iterable: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--empty-\",)),\n validator=_double_hyphen_validator,\n )\n\n required: Optional[bool] = field(default=None)\n\n allow_leading_hyphen: bool = field(default=False)\n\n # Populated by the record_attrs_init_args decorator.\n _provided_args: Tuple[str] = field(default=(), init=False, eq=False)\n\n @property\n def show(self):\n return self._show if self._show is not None else self.parse\n\n def get_negatives(self, type_, *names: str) -> Tuple[str, ...]:\n type_ = get_origin(type_) or type_\n\n if self.negative is not None:\n return self.negative\n elif type_ not in (bool, list, set):\n return ()\n\n out = []\n for name in names:\n if name.startswith(\"--\"):\n name = name[2:]\n elif name.startswith(\"-\"):\n # Do not support automatic negation for short flags.\n continue\n else:\n # Should never reach here.\n raise NotImplementedError(\"All parameters should have started with '-' or '--'.\")\n\n negative_prefixes = self.negative_bool if type_ is bool else self.negative_iterable\n\n for negative_prefix in negative_prefixes:\n out.append(f\"{negative_prefix}{name}\")\n return tuple(out)\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n content = \", \".join(\n [\n f\"{a.alias}={getattr(self, a.name)!r}\"\n for a in self.__attrs_attrs__ # pyright: ignore[reportGeneralTypeIssues]\n if a.alias in self._provided_args\n ]\n )\n return f\"{type(self).__name__}({content})\"\n\n @classmethod\n def combine(cls, *parameters: Optional[\"Parameter\"]) -> \"Parameter\":\n \"\"\"Returns a new Parameter with values of ``parameters``.\n\n Parameters\n ----------\n `*parameters`: Optional[Parameter]\n Parameters who's attributes override ``self`` attributes.\n Ordered from least-to-highest attribute priority.\n \"\"\"\n kwargs = {}\n for parameter in parameters:\n if parameter is None:\n continue\n for a in parameter.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if a.init and a.alias in parameter._provided_args:\n kwargs[a.alias] = getattr(parameter, a.name)\n\n return cls(**kwargs)\n\n @classmethod\n def default(cls) -> \"Parameter\":\n \"\"\"Create a Parameter with all Cyclopts-default values.\n\n This is different than just :class:`Parameter` because the default\n values will be recorded and override all upstream parameter values.\n \"\"\"\n return cls(\n **{a.alias: a.default for a in cls.__attrs_attrs__ if a.init} # pyright: ignore[reportGeneralTypeIssues]\n )" }, { "identifier": "HelpEntry", "path": "cyclopts/help.py", "snippet": "class HelpEntry:\n name: str\n short: str = \"\"\n description: str = \"\"\n required: bool = False" }, { "identifier": "HelpPanel", "path": "cyclopts/help.py", "snippet": "class HelpPanel:\n format: Literal[\"command\", \"parameter\"]\n title: str\n description: str = \"\"\n entries: List[HelpEntry] = field(factory=list)\n\n def remove_duplicates(self):\n seen, out = set(), []\n for item in self.entries:\n if item not in seen:\n seen.add(item)\n out.append(item)\n self.entries = out\n\n def sort(self):\n self.entries.sort(key=lambda x: (x.name.startswith(\"-\"), x.name))\n\n def __rich__(self):\n if not self.entries:\n return _silent\n table = Table.grid(padding=(0, 1))\n text = Text(end=\"\")\n if self.description:\n text.append(self.description + \"\\n\\n\")\n panel = Panel(\n console.Group(text, table),\n box=box.ROUNDED,\n expand=True,\n title_align=\"left\",\n title=self.title,\n )\n\n if self.format == \"command\":\n table.add_column(justify=\"left\", style=\"cyan\")\n table.add_column(justify=\"left\")\n\n for entry in self.entries:\n name = entry.name\n if entry.short:\n name += \",\" + entry.short\n table.add_row(name + \" \", entry.description)\n elif self.format == \"parameter\":\n has_short = any(entry.short for entry in self.entries)\n has_required = any(entry.required for entry in self.entries)\n\n if has_required:\n table.add_column(justify=\"left\", width=1, style=\"red bold\") # For asterisk\n table.add_column(justify=\"left\", no_wrap=True, style=\"cyan\") # For option names\n if has_short:\n table.add_column(justify=\"left\", no_wrap=True, style=\"green\") # For short options\n table.add_column(justify=\"left\") # For main help text.\n\n for entry in self.entries:\n row = []\n if has_required:\n if entry.required:\n row.append(\"*\")\n else:\n row.append(\"\")\n row.append(entry.name + \" \")\n if has_short:\n row.append(entry.short + \" \")\n row.append(entry.description)\n table.add_row(*row)\n else:\n raise NotImplementedError\n\n return panel" }, { "identifier": "create_parameter_help_panel", "path": "cyclopts/help.py", "snippet": "def create_parameter_help_panel(group: \"Group\", iparams, cparams: List[Parameter]) -> HelpPanel:\n icparams = [(ip, cp) for ip, cp in zip(iparams, cparams) if cp.show]\n iparams, cparams = (list(x) for x in zip(*icparams))\n\n help_panel = HelpPanel(format=\"parameter\", title=group.name, description=group.help)\n\n for iparam, cparam in icparams:\n assert cparam.name is not None\n type_ = get_hint_parameter(iparam)[0]\n options = list(cparam.name)\n options.extend(cparam.get_negatives(type_, *options))\n\n # Add an all-uppercase name if it's an argument\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):\n arg_name = options[0].lstrip(\"-\").upper()\n if arg_name != options[0]:\n options = [arg_name, *options]\n\n short_options, long_options = [], []\n for option in options:\n if _is_short(option):\n short_options.append(option)\n else:\n long_options.append(option)\n\n help_components = []\n\n if cparam.help:\n help_components.append(cparam.help)\n\n if cparam.show_choices:\n choices = _get_choices(type_)\n if choices:\n help_components.append(rf\"[dim]\\[choices: {choices}][/dim]\")\n\n if cparam.show_env_var and cparam.env_var:\n env_vars = \" \".join(cparam.env_var)\n help_components.append(rf\"[dim]\\[env var: {env_vars}][/dim]\")\n\n if not cparam.required and (\n cparam.show_default or (cparam.show_default is None and iparam.default is not None)\n ):\n default = \"\"\n if isclass(type_) and issubclass(type_, Enum):\n default = iparam.default.name.lower().replace(\"_\", \"-\")\n else:\n default = iparam.default\n\n help_components.append(rf\"[dim]\\[default: {default}][/dim]\")\n\n if cparam.required:\n help_components.append(r\"[red][dim]\\[required][/dim][/red]\")\n\n # populate row\n help_panel.entries.append(\n HelpEntry(\n name=\",\".join(long_options),\n description=\" \".join(help_components),\n short=\",\".join(short_options),\n required=bool(cparam.required),\n )\n )\n\n return help_panel" }, { "identifier": "format_command_entries", "path": "cyclopts/help.py", "snippet": "def format_command_entries(elements) -> List:\n entries = []\n for element in elements:\n short_names, long_names = [], []\n for name in element.name:\n short_names.append(name) if _is_short(name) else long_names.append(name)\n entry = HelpEntry(\n name=\",\".join(long_names),\n short=\",\".join(short_names),\n description=docstring_parse(element.help).short_description or \"\",\n )\n if entry not in entries:\n entries.append(entry)\n return entries" }, { "identifier": "format_doc", "path": "cyclopts/help.py", "snippet": "def format_doc(root_app, app: \"App\"):\n from cyclopts.core import App # noqa: F811\n\n raw_doc_string = app.help\n\n if not raw_doc_string:\n return _silent\n\n parsed = docstring_parse(raw_doc_string)\n\n components: List[Tuple[str, str]] = []\n if parsed.short_description:\n components.append((parsed.short_description + \"\\n\", \"default\"))\n\n if parsed.long_description:\n components.append((\"\\n\" + parsed.long_description + \"\\n\", \"info\"))\n\n return Text.assemble(*components)" }, { "identifier": "format_usage", "path": "cyclopts/help.py", "snippet": "def format_usage(\n app,\n command_chain: List[str],\n):\n usage = []\n usage.append(\"Usage:\")\n usage.append(app.name[0])\n usage.extend(command_chain)\n\n for command in command_chain:\n app = app[command]\n\n if app._commands:\n usage.append(\"COMMAND\")\n\n if app.default_command:\n to_show = set()\n for parameter in inspect.signature(app.default_command).parameters.values():\n if parameter.kind in (parameter.POSITIONAL_ONLY, parameter.VAR_POSITIONAL, parameter.POSITIONAL_OR_KEYWORD):\n to_show.add(\"[ARGS]\")\n if parameter.kind in (parameter.KEYWORD_ONLY, parameter.VAR_KEYWORD, parameter.POSITIONAL_OR_KEYWORD):\n to_show.add(\"[OPTIONS]\")\n usage.extend(sorted(to_show))\n\n return Text(\" \".join(usage) + \"\\n\", style=\"bold\")" }, { "identifier": "ResolvedCommand", "path": "cyclopts/resolve.py", "snippet": "class ResolvedCommand:\n command: Callable\n groups: List[Group]\n groups_iparams: List[Tuple[Group, List[inspect.Parameter]]]\n iparam_to_groups: ParameterDict\n iparam_to_cparam: ParameterDict\n name_to_iparam: Dict[str, inspect.Parameter]\n\n def __init__(\n self,\n f,\n app_parameter: Optional[Parameter] = None,\n group_arguments: Optional[Group] = None,\n group_parameters: Optional[Group] = None,\n parse_docstring: bool = True,\n ):\n \"\"\"\n ``app_parameter`` implicitly has the command-group parameter already resolved.\n\n Parameters\n ----------\n f: Callable\n Function to resolve annotated :class:`Parameters`.\n app_parameter:\n Default :class:`Parameter` to inherit configuration from.\n group_arguments: Optional[Group]\n Default :class:`Group` for positional-only arguments.\n group_parameters: Optional[Group]\n Default :class:`Group` for non-positional-only arguments.\n parse_docstring: bool\n Parse the docstring to populate Parameter ``help``, if not explicitly set.\n Disable for improved performance if ``help`` won't be used in the resulting :class:`Parameter`.\n \"\"\"\n if group_arguments is None:\n group_arguments = Group.create_default_arguments()\n if group_parameters is None:\n group_parameters = Group.create_default_parameters()\n\n self.command = f\n signature = inspect.signature(f)\n self.name_to_iparam = cast(Dict[str, inspect.Parameter], signature.parameters)\n\n # Get:\n # 1. Fully resolved and created Groups.\n # 2. A mapping of inspect.Parameter to those Group objects.\n self.groups, self.iparam_to_groups = _resolve_groups(f, app_parameter, group_arguments, group_parameters)\n\n # Fully Resolve each Cyclopts Parameter\n self.iparam_to_cparam = ParameterDict()\n iparam_to_docstring_cparam = _resolve_docstring(f) if parse_docstring else ParameterDict()\n for iparam, groups in self.iparam_to_groups.items():\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_POSITIONAL):\n # Name is only used for help-string\n names = [iparam.name.upper()]\n else:\n names = [\"--\" + iparam.name.replace(\"_\", \"-\")]\n\n default_name_parameter = Parameter(name=names)\n\n cparam = get_hint_parameter(\n iparam,\n app_parameter,\n *(x.default_parameter for x in groups),\n iparam_to_docstring_cparam.get(iparam),\n default_name_parameter,\n Parameter(required=iparam.default is iparam.empty),\n )[1]\n self.iparam_to_cparam[iparam] = cparam\n\n self.bind = signature.bind_partial if _has_unparsed_parameters(f, app_parameter) else signature.bind\n\n # Create a convenient group-to-iparam structure\n self.groups_iparams = [\n (\n group,\n [iparam for iparam, groups in self.iparam_to_groups.items() if group in groups],\n )\n for group in self.groups\n ]" } ]
import inspect import sys import attrs import pytest from enum import Enum from textwrap import dedent from typing import List, Literal, Optional, Union from typing_extensions import Annotated from typing import Annotated from cyclopts import App, Group, Parameter from cyclopts.help import ( HelpEntry, HelpPanel, create_parameter_help_panel, format_command_entries, format_doc, format_usage, ) from cyclopts.resolve import ResolvedCommand
11,381
"""Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd): command = ResolvedCommand(cmd, *default_function_groups) with console.capture() as capture: group, iparams = command.groups_iparams[0] cparams = [command.iparam_to_cparam[x] for x in iparams] console.print(create_parameter_help_panel(group, iparams, cparams)) return capture.get() return inner def test_help_format_group_parameters(capture_format_group_parameters): def cmd(
if sys.version_info < (3, 9): else: @pytest.fixture def app(): return App( name="app", help="App Help String Line 1.", ) def test_empty_help_panel_rich_silent(console): help_panel = HelpPanel(format="command", title="test") with console.capture() as capture: console.print(help_panel) actual = capture.get() assert actual == "" def test_help_default_action(app, console): """No command should default to help.""" with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage(app, console): app.usage = "My custom usage." with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ My custom usage. App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage_subapp(app, console): app.command(App(name="foo", usage="My custom usage.")) with console.capture() as capture: app(["foo", "--help"], console=console) actual = capture.get() expected = dedent( """\ My custom usage. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_default_help_flags(console): """Standard help flags.""" app = App(name="app", help="App Help String Line 1.") with console.capture() as capture: app(["--help"], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_usage_empty(console): app = App( name="app", help="App Help String Line 1.", help_flags=[], version_flags=[], ) with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app\n\n" def test_help_format_usage_command(app, console): @app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd): command = ResolvedCommand(cmd, *default_function_groups) with console.capture() as capture: group, iparams = command.groups_iparams[0] cparams = [command.iparam_to_cparam[x] for x in iparams] console.print(create_parameter_help_panel(group, iparams, cparams)) return capture.get() return inner def test_help_format_group_parameters(capture_format_group_parameters): def cmd(
foo: Annotated[str, Parameter(help="Docstring for foo.")],
2
2023-11-03 02:24:25+00:00
16k
RoboFlamingo/RoboFlamingo
robot_flamingo/models/factory.py
[ { "identifier": "BCFlamingo", "path": "robot_flamingo/models/flamingo_bc.py", "snippet": "class BCFlamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n use_media_placement_augmentation: bool = False,\n # this is the window size sampled from the episode\n window_size: int = 8,\n use_gripper=False,\n fusion_mode='',\n sep_resampler=False,\n use_state=False,\n use_diff=False,\n diff_horizon=32,\n last_action=False,\n n_timesteps=150,\n state_dim=15,\n use_hist=False,\n debug=False,\n predict_epsilon=True,\n pad_length=-1,\n multi_step_action=1,\n sep_lm_head=False,\n return_feature = False,\n llm='llama_9b',\n pooling='max',\n residual=False,\n tcp_rel=False,\n replan=-1,\n decoder_type='lstm',\n hidden_size=None,\n fwd_pred=False,\n fwd_pred_hand=False,\n refresh=-1\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.\n \"\"\"\n super().__init__()\n self.use_gripper = use_gripper\n self.use_state = use_state\n self.fusion_mode = fusion_mode\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.vis_dim = vis_dim\n self.window_size = window_size\n self.tcp_rel = tcp_rel\n self.act_step = multi_step_action\n print('window size: {}'.format(window_size))\n self.vision_encoder = vision_encoder\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.sep_resampler = sep_resampler\n self.use_hist = use_hist\n self.lang_encoder = lang_encoder\n self.pad_length = pad_length\n self.replan = replan\n if self.replan != -1:\n self.replan = min(int(replan * self.window_size), 180)\n self.refresh = refresh\n if hasattr(lang_encoder.config, \"d_model\"):\n self.lang_dim = lang_encoder.config.d_model # mpt uses d_model\n else:\n self.lang_dim = lang_encoder.config.hidden_size\n \n # print(self.vis_dim, self.lang_dim)\n \n self.residual = residual\n\n if not debug:\n if 'llama' in llm:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n use_media_placement_augmentation=self.use_media_placement_augmentation,\n residual=residual,\n )\n else:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n lang_hidden_size=self.lang_dim,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n gradient_checkpointing=False,\n )\n\n if sep_resampler:\n self.perceiver_gripper = PerceiverResampler(dim=self.vis_dim)\n self.perceiver_gripper.load_state_dict(copy.deepcopy(self.perceiver.state_dict()))\n if use_state:\n self.state_fc = nn.Linear(state_dim, self.vis_dim)\n if use_hist:\n self.frame_embs = nn.Parameter(torch.randn(self.window_size, self.vis_dim))\n # To-do: nn archiecture for actor\n self.llm = llm\n if llm=='llama':\n in_features = lang_encoder.lm_head.in_features\n else:\n in_features = self.lang_dim\n self.use_diff = use_diff\n self.decoder_type = decoder_type\n if decoder_type == 'lstm':\n lm_head = DeterministicDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action, pooling=pooling)\n self.lang_encoder.lm_head = lm_head\n elif decoder_type == 'fc':\n if use_hist:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n elif 'vit_concat' in fusion_mode:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n else:\n raise NotImplementedError\n elif decoder_type == 'diffusion':\n if use_diff:\n self.diffusion_model = DiffusionDecoder(\n self.action_head.hidden_size, \n self.window_size,\n input_dim=self.action_head.out_features+1,\n n_timesteps=n_timesteps,\n horizon=diff_horizon,\n predict_epsilon=predict_epsilon,\n )\n else:\n raise NotImplementedError\n elif decoder_type=='gpt':\n lm_head = GPTDecoder(in_features, self.window_size, use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, multi_step_action=multi_step_action, pooling=pooling, hidden_size=hidden_size)\n self.lang_encoder.lm_head = self.action_head = lm_head\n else:\n raise NotImplementedError\n\n self.sep_lm_head = sep_lm_head\n if sep_lm_head:\n self.lm_head = self.lang_encoder.lm_head\n self.lang_encoder.lm_head = nn.Identity()\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n use_cached_vision_x: bool = False,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n vision_gripper = None,\n state_tensor = None,\n return_feature = False,\n policy_mask=None\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n raw_rgb = vision_x.clone()\n raw_gripper = vision_gripper.clone()\n assert (\n vision_x is not None\n ) or use_cached_vision_x, (\n \"Must provide either vision_x or use_cached_vision_x to True.\"\n )\n\n if use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when use_cached_vision_x is True.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n if self.use_hist:\n self._encode_history_vision_post_fusion(vision_x, vision_gripper, state_tensor)\n else:\n if not self.use_gripper or self.fusion_mode == 'two_way':\n vision_x = self._encode_vision_x(vision_x=vision_x)\n else:\n if self.fusion_mode == 'pre':\n self._encode_multi_vision_pre_fusion(vision_x, vision_gripper, state_tensor)\n elif self.fusion_mode == 'post':\n self._encode_multi_vision_post_fusion(vision_x, vision_gripper, state_tensor)\n elif self.fusion_mode == 'vit_concat':\n self._encode_history_vision_fc_post(vision_x, vision_gripper, state_tensor)\n \n if 'llama' in self.llm:\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask,\n # labels=labels, # 不输入label,程序就不会计算loss\n past_key_values=past_key_values,\n use_cache=use_cache,\n )\n else:\n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask\n )\n \n if self.sep_lm_head:\n output_llm = output.logits\n output_lm_head = self.lm_head(output_llm, state_tensor=state_tensor, return_feature=return_feature)\n output.logits = output_lm_head\n \n if clear_conditioned_layers:\n self.lang_encoder.clear_conditioned_layers()\n\n # action_seq = self.action_head(vision_x)\n return output\n\n # Generate function with actor for text time adpatation\n def generate(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n num_beams=1,\n max_new_tokens=None,\n temperature=1.0,\n top_k=0,\n top_p=1.0,\n no_repeat_ngram_size=0,\n prefix_allowed_tokens_fn=None,\n length_penalty=1.0,\n num_return_sequences=1,\n do_sample=False,\n early_stopping=False,\n ):\n \"\"\"\n Generate text conditioned on vision and language inputs.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n images in the same chunk are collated along T_img, and frames are collated along F\n currently only F=1 is supported (single-frame videos)\n lang_x (torch.Tensor): Language input\n shape (B, T_txt)\n max_length (int, optional): Maximum length of the output. Defaults to None.\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n num_beams (int, optional): Number of beams. Defaults to 1.\n max_new_tokens (int, optional): Maximum new tokens. Defaults to None.\n temperature (float, optional): Temperature. Defaults to 1.0.\n top_k (int, optional): Top k. Defaults to 0.\n top_p (float, optional): Top p. Defaults to 1.0.\n no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.\n length_penalty (float, optional): Length penalty. Defaults to 1.0.\n num_return_sequences (int, optional): Number of return sequences. Defaults to 1.\n do_sample (bool, optional): Do sample. Defaults to False.\n early_stopping (bool, optional): Early stopping. Defaults to False.\n Returns:\n torch.Tensor: lang_x with generated tokens appended to it\n \"\"\"\n if num_beams > 1:\n vision_x = vision_x.repeat_interleave(num_beams, dim=0)\n\n self._encode_vision_x(vision_x=vision_x)\n\n output = self.lang_encoder.generate(\n lang_x,\n attention_mask=attention_mask,\n eos_token_id=self.eoc_token_id,\n num_beams=num_beams,\n max_new_tokens=max_new_tokens,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n no_repeat_ngram_size=no_repeat_ngram_size,\n length_penalty=length_penalty,\n num_return_sequences=num_return_sequences,\n do_sample=do_sample,\n early_stopping=early_stopping,\n )\n\n self.lang_encoder.clear_conditioned_layers()\n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_vision(self, vision_x: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n return vision_x\n\n def _encode_multi_vision_pre_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=3)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n # state_tensor = state_tensor.double()\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_two_way(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=0) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=0) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_history_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = vision_rgb.view(bs, self.window_size, *vision_rgb.shape[1:])\n _, _, T, p, v_tok, dim = vision_rgb.shape[:6]\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_rgb = vision_rgb + frame_embs\n vision_rgb = rearrange(vision_rgb, 'b F T p v d -> (b F) T p v d')\n vision_rgb = self.perceiver(vision_rgb)\n\n vision_gripper = vision_gripper.view(vision_gripper.shape[0] // self.window_size, self.window_size,\n *vision_gripper.shape[1:])\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_gripper = vision_gripper + frame_embs\n vision_gripper = rearrange(vision_gripper, 'b F T p v d -> (b F) T p v d')\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n \n def _encode_history_vision_fc_post(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = self._encode_vision(vision_rgb)\n vision_rgb = self.perceiver(vision_rgb) # BxL, T, n, d\n vision_rgb = vision_rgb.view(-1, self.window_size, *vision_rgb.shape[1:]) # B, L, T, n, d\n vision_rgb = rearrange(vision_rgb, 'b L T n d -> b T (n L) d')\n\n vision_gripper = self._encode_vision(vision_gripper)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n vision_gripper = vision_gripper.view(-1, self.window_size, *vision_gripper.shape[1:]) # B, L, T, n, d\n vision_gripper = rearrange(vision_gripper, 'b L T n d -> b T (n L) d')\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2)\n\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n \n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x" }, { "identifier": "MPTFlamingo", "path": "robot_flamingo/models/flamingo_mpt.py", "snippet": "class MPTFlamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: int,\n cross_attn_every_n_layers: int = 1,\n use_media_placement_augmentation: bool = False,\n # this is the window size sampled from the episode\n window_size: int = 8,\n use_gripper=False,\n fusion_mode='',\n sep_resampler=False,\n use_state=False,\n use_diff=False,\n diff_horizon=32,\n last_action=False,\n n_timesteps=150,\n state_dim=15,\n use_hist=False,\n debug=False,\n predict_epsilon=True,\n pad_length=-1,\n multi_step_action=1,\n sep_lm_head=False,\n return_feature = False,\n llm='llama',\n pooling='max',\n residual=False,\n tcp_rel=False,\n replan=-1,\n decoder_type='lstm',\n hidden_size=None,\n fwd_pred=False,\n fwd_pred_hand=False,\n global_latent=10,\n no_image_patch=False,\n refresh=-1\n ):\n \"\"\"\n Args:\n vision_encoder (nn.Module): HF CLIPModel\n lang_encoder (nn.Module): HF causal language model\n eoc_token_id (int): Token id for <|endofchunk|>\n media_token_id (int): Token id for <image>\n vis_dim (int): Dimension of the visual features.\n Visual features are projected to match this shape along the last dimension.\n cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.\n use_media_placement_augmentation (bool, optional): Whether to randomly assign images to the preceding or following text in training. Defaults to False.\n \"\"\"\n super().__init__()\n\n self.use_gripper = use_gripper\n self.use_state = use_state\n self.fusion_mode = fusion_mode\n self.eoc_token_id = eoc_token_id\n self.media_token_id = media_token_id\n self.use_media_placement_augmentation = use_media_placement_augmentation\n self.vis_dim = vis_dim\n self.window_size = window_size\n self.tcp_rel = tcp_rel\n self.act_step = multi_step_action\n print('window size: {}'.format(window_size))\n self.vision_encoder = vision_encoder\n self.perceiver = PerceiverResampler(dim=self.vis_dim)\n self.sep_resampler = sep_resampler\n self.use_hist = use_hist\n self.lang_encoder = lang_encoder\n self.pad_length = pad_length\n self.replan = replan\n if self.replan != -1:\n self.replan = min(int(replan * self.window_size), 180)\n self.refresh = refresh\n if hasattr(lang_encoder.config, \"d_model\"):\n self.lang_dim = lang_encoder.config.d_model # mpt uses d_model\n else:\n self.lang_dim = lang_encoder.config.hidden_size\n\n self.residual = residual\n print(self.vis_dim, self.lang_dim)\n print(lang_encoder.config)\n if not debug:\n if 'llama' in llm:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n use_media_placement_augmentation=self.use_media_placement_augmentation,\n residual=residual,\n )\n else:\n self.lang_encoder.init_flamingo(\n media_token_id=media_token_id,\n lang_hidden_size=self.lang_dim,\n vis_hidden_size=self.vis_dim,\n cross_attn_every_n_layers=cross_attn_every_n_layers,\n gradient_checkpointing=False,\n )\n\n if sep_resampler:\n self.perceiver_gripper = PerceiverResampler(dim=self.vis_dim)\n self.perceiver_gripper.load_state_dict(copy.deepcopy(self.perceiver.state_dict()))\n if use_state:\n self.state_fc = nn.Linear(state_dim, self.vis_dim)\n if use_hist:\n self.frame_embs = nn.Parameter(torch.randn(self.window_size, self.vis_dim))\n # To-do: nn archiecture for actor\n self.llm = llm\n if llm=='llama':\n in_features = lang_encoder.lm_head.in_features\n else:\n in_features = self.lang_dim\n self.use_diff = use_diff\n self.decoder_type = decoder_type\n if decoder_type == 'lstm':\n lm_head = DeterministicDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action, pooling=pooling)\n self.lang_encoder.lm_head = lm_head\n elif decoder_type == 'fc':\n if use_hist:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n elif 'vit_concat' in fusion_mode:\n self.lang_encoder.lm_head = self.action_head = FCDecoder(in_features, self.window_size, \n use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, use_state=use_state, return_feature=return_feature, multi_step_action=multi_step_action)\n else:\n raise NotImplementedError\n elif decoder_type == 'diffusion':\n if use_diff:\n self.diffusion_model = DiffusionDecoder(\n self.action_head.hidden_size, \n self.window_size,\n input_dim=self.action_head.out_features+1,\n n_timesteps=n_timesteps,\n horizon=diff_horizon,\n predict_epsilon=predict_epsilon,\n )\n else:\n raise NotImplementedError\n elif decoder_type=='gpt':\n lm_head = GPTDecoder(in_features, self.window_size, use_diff=use_diff, last_action=last_action, fusion_mode=fusion_mode, multi_step_action=multi_step_action, pooling=pooling, hidden_size=hidden_size)\n self.lang_encoder.lm_head = self.action_head = lm_head\n else:\n raise NotImplementedError\n \n sep_lm_head = True\n self.sep_lm_head = sep_lm_head\n if sep_lm_head:\n self.lm_head = self.lang_encoder.lm_head\n self.lang_encoder.lm_head = nn.Identity()\n\n def forward(\n self,\n vision_x: torch.Tensor,\n lang_x: torch.Tensor,\n attention_mask: torch.Tensor = None,\n labels: torch.Tensor = None,\n use_cached_vision_x: bool = False,\n clear_conditioned_layers: bool = True,\n past_key_values=None,\n use_cache: bool = False,\n vision_gripper = None,\n state_tensor = None,\n return_feature = False,\n policy_mask=None\n ):\n \"\"\"\n Forward pass of Flamingo.\n\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W) with F=1\n lang_x (torch.Tensor): Language input ids\n shape (B, T_txt)\n attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.\n labels (torch.Tensor, optional): Labels. Defaults to None.\n clear_conditioned_layers: if True, clear the conditioned layers\n once the foward pass is completed. Set this to false if the\n same set of images will be reused in another subsequent\n forward pass.\n past_key_values: pre-computed values to pass to language model.\n See past_key_values documentation in Hugging Face\n CausalLM models.\n use_cache: whether to use cached key values. See use_cache\n documentation in Hugging Face CausalLM models.\n \"\"\"\n raw_rgb = vision_x.clone()\n raw_gripper = vision_gripper.clone()\n assert (\n vision_x is not None\n ) or use_cached_vision_x, (\n \"Must provide either vision_x or use_cached_vision_x to True.\"\n )\n\n if use_cached_vision_x:\n # Case: use cached; vision_x should be cached and other\n # vision-related inputs should not be provided.\n assert (\n vision_x is None\n ), \"Expect vision_x to be None when use_cached_vision_x is True.\"\n assert self.lang_encoder.is_conditioned()\n\n else:\n # Case: do not use caching (i.e. this is a standard forward pass);\n if self.use_hist:\n self._encode_history_vision_post_fusion(vision_x, vision_gripper)\n else:\n if not self.use_gripper or self.fusion_mode == 'two_way':\n vision_x = self._encode_vision_x(vision_x=vision_x)\n else:\n if self.fusion_mode == 'pre':\n self._encode_multi_vision_pre_fusion(vision_x, vision_gripper)\n elif self.fusion_mode == 'post':\n self._encode_multi_vision_post_fusion(vision_x, vision_gripper)\n elif self.fusion_mode == 'vit_concat':\n self._encode_history_vision_fc_post(vision_x, vision_gripper)\n \n output = self.lang_encoder(\n input_ids=lang_x,\n attention_mask=attention_mask.bool(),\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_hidden_states=True\n )\n\n output_hs = output.hidden_states[-1]\n output_hs = self.lm_head(output_hs, state_tensor=state_tensor, return_feature=return_feature)\n output.logits = output_hs\n \n return output\n\n def _encode_vision_x(self, vision_x: torch.Tensor):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_vision(self, vision_x: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_x (torch.Tensor): Vision input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n\n assert vision_x.ndim == 6, \"vision_x should be of shape (b, T_img, F, C, H, W)\"\n b, T, F = vision_x.shape[:3]\n assert F == 1, \"Only single frame supported\"\n\n vision_x = rearrange(vision_x, \"b T F c h w -> (b T F) c h w\")\n with torch.no_grad():\n vision_x = self.vision_encoder.visual(vision_x)[1]\n vision_x = rearrange(vision_x, \"(b T F) v d -> b T F v d\", b=b, T=T, F=F)\n return vision_x\n\n def _encode_multi_vision_pre_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=3)\n\n vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)\n\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_multi_vision_two_way(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n vision_rgb = self.perceiver(vision_rgb)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=0) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=0) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n\n def _encode_history_vision_post_fusion(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n vision_rgb = self._encode_vision(vision_rgb)\n vision_gripper = self._encode_vision(vision_gripper)\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = vision_rgb.view(bs, self.window_size, *vision_rgb.shape[1:])\n _, _, T, p, v_tok, dim = vision_rgb.shape[:6]\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_rgb = vision_rgb + frame_embs\n vision_rgb = rearrange(vision_rgb, 'b F T p v d -> (b F) T p v d')\n vision_rgb = self.perceiver(vision_rgb)\n\n vision_gripper = vision_gripper.view(vision_gripper.shape[0] // self.window_size, self.window_size,\n *vision_gripper.shape[1:])\n frame_embs = repeat(self.frame_embs, 'F d -> b F T p v d', b=bs, T=T, p=p, v=v_tok)\n vision_gripper = vision_gripper + frame_embs\n vision_gripper = rearrange(vision_gripper, 'b F T p v d -> (b F) T p v d')\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2) # reshapes to (b, T, 2*n, d)\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x\n \n def _encode_history_vision_fc_post(self, vision_rgb: torch.Tensor, vision_gripper: torch.Tensor, state_tensor=None):\n \"\"\"\n Compute media tokens from vision input by passing it through vision encoder and conditioning language model.\n Args:\n vision_rgb (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n vision_gripper (torch.Tensor): Vision rgb input\n shape (B, T_img, F, C, H, W)\n Images in the same chunk are collated along T_img, and frames are collated along F\n Currently only F=1 is supported (single-frame videos)\n\n rearrange code based on https://github.com/dhansmair/flamingo-mini\n \"\"\"\n bs = int(vision_rgb.shape[0] // self.window_size)\n vision_rgb = self._encode_vision(vision_rgb)\n vision_rgb = self.perceiver(vision_rgb) # BxL, T, n, d\n vision_rgb = vision_rgb.view(-1, self.window_size, *vision_rgb.shape[1:]) # B, L, T, n, d\n vision_rgb = rearrange(vision_rgb, 'b L T n d -> b T (n L) d')\n\n vision_gripper = self._encode_vision(vision_gripper)\n if self.sep_resampler:\n vision_gripper = self.perceiver_gripper(vision_gripper)\n else:\n vision_gripper = self.perceiver(vision_gripper)\n vision_gripper = vision_gripper.view(-1, self.window_size, *vision_gripper.shape[1:]) # B, L, T, n, d\n vision_gripper = rearrange(vision_gripper, 'b L T n d -> b T (n L) d')\n\n vision_x = torch.cat([vision_rgb, vision_gripper], dim=2)\n\n if self.use_state and state_tensor is not None:\n state_tensor = self.state_fc(state_tensor)\n vision_x = torch.cat([vision_x, state_tensor], dim=2) # reshapes to (b, T, 2*n+1, d)\n \n for layer in self.lang_encoder._get_decoder_layers():\n layer.condition_vis_x(vision_x)\n\n return vision_x" } ]
from logging import debug from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig from typing import Optional from robot_flamingo.models.flamingo_bc import BCFlamingo from robot_flamingo.models.flamingo_mpt import MPTFlamingo from open_flamingo.src.flamingo_lm import FlamingoLMMixin from open_flamingo.src.utils import extend_instance from open_flamingo.src.factory import _infer_decoder_layers_attr_name import open_clip
13,277
def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name: Model_fn = BCFlamingo elif 'mpt' in llm_name:
mpt_dict = { "mpt_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b", "tokenizer_path": "path_to/mpt-1b-redpajama-200b", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b/checkpoint.pt" }, "mpt_dolly_3b": { "lang_encoder_path": "path_to/mpt-1b-redpajama-200b-dolly", "tokenizer_path": "path_to/mpt-1b-redpajama-200b-dolly", "cross_attn_every_n_layers": 1, "openflamingo_checkpoint": "path_to/OpenFlamingo-3B-vitl-mpt1b-langinstruct/checkpoint.pt" }, "mpt_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Instruct-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b-langinstruct/checkpoint.pt" }, "mpt_base_4b": { "lang_encoder_path": "path_to/RedPajama-INCITE-Base-3B-v1", "tokenizer_path": "path_to/RedPajama-INCITE-Base-3B-v1", "cross_attn_every_n_layers": 2, "openflamingo_checkpoint": "path_to/OpenFlamingo-4B-vitl-rpj3b/checkpoint.pt" }, "mpt_9b": { "lang_encoder_path": "path_to/mpt-7b", "tokenizer_path": "path_to/mpt-7b", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B-vitl-mpt7b/checkpoint.pt" }, "llama_9b": { "lang_encoder_path": "path_to/llama-7b-hf-jxu124", "tokenizer_path": "path_to/llama-7b-hf-jxu124", "cross_attn_every_n_layers": 4, "openflamingo_checkpoint": "path_to/OpenFlamingo-9B/checkpoint.pt" } } def get_transforms( clip_vision_encoder_path: str = "ViT-L-14", clip_vision_encoder_pretrained: str = "openai", tokenizer_path: str = "path_to/llama-7b-hf-jxu124", use_local_files: bool = False, ): vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) text_tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) return image_processor, text_tokenizer def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, # this is the window size sampled from the episode window_size: int = 32, freeze_embed: bool = False, train_params = -1, use_gripper=False, use_state=False, last_action=False, fusion_mode='', pad_length=-1, debug=False, sep_resampler=False, sep_lm_head=False, unfreeze_vit=False, return_feature=False, multi_step_action=1, llm_name='llama_9b', pooling='max', residual=False, tcp_rel=False, replan=-1, decoder_type='lstm', hidden_size=None, freeze_sampler=False, fwd_pred=False, fwd_pred_hand=False, no_image_patch=False, global_latent=1, refresh=-1, **flamingo_kwargs, ): """ Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model """ vision_encoder, _, image_processor = open_clip.create_model_and_transforms( clip_vision_encoder_path, pretrained=clip_vision_encoder_pretrained ) # set the vision encoder to output the visual features vision_encoder.visual.output_tokens = True text_tokenizer = AutoTokenizer.from_pretrained( tokenizer_path, local_files_only=use_local_files ) # add Flamingo special tokens to the tokenizer text_tokenizer.add_special_tokens( {"additional_special_tokens": ["<|endofchunk|>", "<image>"]} ) if text_tokenizer.pad_token is None: # Issue: GPT models don't have a pad token, which we use to # modify labels for the loss. text_tokenizer.add_special_tokens({"pad_token": "<PAD>"}) if debug: # Load the local checkpoint into a model instance. lang_encoder = AutoModelForCausalLM.from_pretrained(lang_encoder_path, ignore_keys=["config"], trust_remote_code=True) # Set the `init_weights` parameter to `False` to prevent the model from loading the pretrained weights. lang_encoder.init_weights(False) else: print(lang_encoder_path) lang_encoder = AutoModelForCausalLM.from_pretrained( lang_encoder_path, local_files_only=use_local_files, trust_remote_code=True ) # print(lang_encoder_path) # if llm_name == 'llama': # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # else: # # name = 'mosaicml/mpt-7b' # config = { # "model_type": "auto", # "add_lm_head": True, # } # lang_encoder = AutoModelForCausalLM.from_pretrained( # lang_encoder_path, local_files_only=use_local_files # ) # hacks for MPT-1B, which doesn't have a get_input_embeddings method if "mpt-1b-redpajama-200b" in lang_encoder_path: class EmbeddingFnMixin: def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, new_embeddings): self.transformer.wte = new_embeddings extend_instance(lang_encoder, EmbeddingFnMixin) extend_instance(lang_encoder, FlamingoLMMixin) if decoder_layers_attr_name is None: decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder) lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name) # print(lang_encoder.base_model_prefix) # print(getattr(lang_encoder, lang_encoder.base_model_prefix, lang_encoder)) # print(lang_encoder) lang_encoder.resize_token_embeddings(len(text_tokenizer)) if 'llama' in llm_name: Model_fn = BCFlamingo elif 'mpt' in llm_name:
Model_fn = MPTFlamingo
1
2023-11-02 01:36:23+00:00
16k
bigai-nlco/langsuite
langsuite/envs/teach/libs/teach/dataset/dataset.py
[ { "identifier": "Definitions", "path": "langsuite/envs/teach/libs/teach/dataset/definitions.py", "snippet": "class Definitions:\n def __init__(self, definitions=None, simulator=\"THOR\", version=\"2.0\"):\n self.simulator = simulator\n self.version = version\n if definitions is None:\n with importlib.resources.open_text(\n meta_data_files, \"default_definitions.json\"\n ) as data_file:\n definitions = json.load(data_file, object_pairs_hook=OrderedDict)[\n \"definitions\"\n ]\n\n if version == \"2.0\" and simulator == \"THOR\":\n (\n tasks,\n task_id_to_task_dict,\n task_name_to_task_dict,\n ) = Task_THOR.load_tasks(task_definitions)\n definitions[\"tasks\"] = tasks\n self.map_tasks_id2info = task_id_to_task_dict\n self.map_tasks_name2info = task_name_to_task_dict\n else:\n raise RuntimeError(\n \"No support for version \"\n + str(version)\n + \" with simulator \"\n + str(simulator)\n )\n\n self.info = definitions\n self.map_agents_id2info = self.__create_lookup_agents()\n self.map_status_id2name = self.__create_lookup_status()\n (\n self.map_actions_id2info,\n self.map_actions_name2info,\n ) = self.__create_lookup_actions()\n\n def __get_files_recursive(self, root_dir, file_list, extension=\".json\"):\n for path in Path(root_dir).iterdir():\n if path.is_dir():\n self.__get_files_recursive(path, file_list)\n elif os.path.isfile(path) and path.suffix == extension:\n file_list.append(path.resolve())\n\n def to_dict(self):\n info_dict = copy.deepcopy(self.info)\n info_dict[\"tasks\"] = [x.to_dict() for x in info_dict[\"tasks\"]]\n return info_dict\n\n def __create_lookup_actions(self):\n _map_id = OrderedDict()\n for action in self.info[\"actions\"]:\n _map_id[action[\"action_id\"]] = OrderedDict(\n [\n (\"action_name\", action[\"action_name\"]),\n (\"action_type\", action[\"action_type\"]),\n (\"pose\", action.get(\"pose\")),\n (\"pose_delta\", action.get(\"pose_delta\")),\n ]\n )\n _map_name = OrderedDict()\n for action in self.info[\"actions\"]:\n _map_name[action[\"action_name\"]] = OrderedDict(\n [\n (\"action_id\", action[\"action_id\"]),\n (\"action_type\", action[\"action_type\"]),\n (\"pose\", action.get(\"pose\")),\n (\"pose_delta\", action.get(\"pose_delta\")),\n ]\n )\n return _map_id, _map_name\n\n def __create_lookup_tasks(self):\n _map_id = OrderedDict()\n for task in self.info[\"tasks\"]:\n _map_id[task[\"task_id\"]] = OrderedDict(\n [\n (\"task_id\", task[\"task_id\"]),\n (\"task_name\", task[\"task_name\"]),\n (\"task_nparams\", task[\"task_nparams\"]),\n (\"subgoals\", task[\"subgoals\"]),\n ]\n )\n _map_name = OrderedDict()\n for task in self.info[\"tasks\"]:\n _map_name[task[\"task_name\"]] = OrderedDict(\n [\n (\"task_id\", task[\"task_id\"]),\n (\"task_name\", task[\"task_name\"]),\n (\"task_nparams\", task[\"task_nparams\"]),\n (\"subgoals\", task[\"subgoals\"]),\n ]\n )\n return _map_id, _map_name\n\n def __create_lookup_agents(self):\n _map = OrderedDict()\n for agent in self.info[\"agents\"]:\n _map[agent[\"agent_id\"]] = OrderedDict(\n [\n (\"agent_name\", agent[\"agent_name\"]),\n (\"agent_type\", agent[\"agent_type\"]),\n ]\n )\n return _map\n\n def __create_lookup_status(self):\n _map = OrderedDict()\n for status in self.info[\"status\"]:\n _map[status[\"status_id\"]] = status[\"status_name\"]\n return _map" }, { "identifier": "Task", "path": "langsuite/envs/teach/libs/teach/dataset/task.py", "snippet": "class Task:\n def __init__(\n self,\n task_id,\n task_name,\n task_nparams,\n task_params,\n subgoals,\n comments=\"\",\n episodes=None,\n ):\n self.task_id = task_id\n self.task_name = task_name\n self.task_nparams = task_nparams\n self.task_params = task_params\n self.subgoals = subgoals\n self.comments = comments\n self.episodes = [] if episodes is None else episodes\n\n def add_episode(self, episode):\n self.episodes.append(episode)\n\n def to_dict(self):\n _dict = OrderedDict()\n _dict[\"task_id\"] = self.task_id\n _dict[\"task_name\"] = self.task_name\n _dict[\"task_params\"] = self.task_params\n _dict[\"task_nparams\"] = self.task_nparams\n _dict[\"subgoals\"] = self.subgoals\n _dict[\"comments\"] = self.comments\n _dict[\"episodes\"] = [x.to_dict() for x in self.episodes]\n return _dict\n\n @classmethod\n def from_dict(cls, task_dict, definitions, process_init_state=True) -> \"Task\":\n episodes = [\n Episode.from_dict(episode_dict, definitions, process_init_state)\n for episode_dict in task_dict.get(\"episodes\")\n ]\n return cls(\n task_id=task_dict[\"task_id\"],\n task_name=task_dict[\"task_name\"],\n task_nparams=task_dict[\"task_nparams\"],\n task_params=task_dict[\"task_params\"],\n subgoals=task_dict[\"subgoals\"],\n comments=task_dict.get(\"comments\"),\n episodes=episodes,\n )" }, { "identifier": "Task_THOR", "path": "langsuite/envs/teach/libs/teach/dataset/task_THOR.py", "snippet": "class Task_THOR(Task):\n def __init__(\n self,\n task_id,\n task_name,\n task_nparams,\n task_params,\n task_anchor_object,\n desc,\n components,\n relations,\n comments=\"\",\n episodes=None,\n ):\n subgoals = dict()\n subgoals[\"components\"] = components\n subgoals[\"relations\"] = relations\n super().__init__(\n task_id, task_name, task_nparams, task_params, subgoals, comments, episodes\n )\n self.task_id = task_id\n self.task_name = task_name\n self.task_nparams = task_nparams\n self.task_params = task_params\n self.task_anchor_object = task_anchor_object\n self.desc = desc\n self.components = components\n self.relations = relations\n self.comments = comments\n self.episodes = [] if episodes is None else episodes\n\n @staticmethod\n def component_to_dict(component):\n if \"task_name\" not in component:\n return component\n else:\n component_dict = copy.deepcopy(component)\n component_dict[\"task\"] = component[\"task\"].to_dict()\n return component_dict\n\n def to_dict(self):\n _dict = OrderedDict()\n _dict[\"task_id\"] = self.task_id\n _dict[\"task_name\"] = self.task_name\n _dict[\"task_params\"] = self.task_params\n _dict[\"task_nparams\"] = self.task_nparams\n _dict[\"task_anchor_object\"] = self.task_anchor_object\n _dict[\"desc\"] = self.desc\n _dict[\"components\"] = dict()\n for component_key, component in self.components.items():\n component_dict = self.component_to_dict(component)\n _dict[\"components\"][component_key] = component_dict\n _dict[\"relations\"] = self.relations\n _dict[\"comments\"] = self.comments\n _dict[\"episodes\"] = [x.to_dict() for x in self.episodes]\n return _dict\n\n @classmethod\n def from_dict(cls, task_dict, definitions, process_init_state=True) -> \"Task_THOR\":\n episodes = [\n Episode.from_dict(episode_dict, definitions, process_init_state)\n for episode_dict in task_dict.get(\"episodes\")\n ]\n return cls(\n task_id=task_dict[\"task_id\"],\n task_name=task_dict[\"task_name\"],\n task_nparams=task_dict[\"task_nparams\"],\n task_params=task_dict[\"task_params\"],\n task_anchor_object=task_dict[\"task_anchor_object\"],\n desc=task_dict[\"desc\"],\n components=task_dict[\"components\"],\n relations=task_dict[\"relations\"],\n comments=task_dict.get(\"comments\"),\n episodes=episodes,\n )\n\n @classmethod\n def from_v1_dict(\n cls, task_dict, definitions, process_init_state=True\n ) -> \"Task_THOR\":\n episodes = [\n Episode.from_dict(episode_dict, definitions, process_init_state)\n for episode_dict in task_dict.get(\"episodes\")\n ]\n return cls(\n task_id=task_dict[\"task_id\"],\n task_name=task_dict[\"task_name\"],\n task_nparams=task_dict[\"task_nparams\"],\n task_params=task_dict[\"task_params\"],\n task_anchor_object=None,\n desc=\"Complete the following tasks.\",\n components=dict(enumerate(task_dict[\"subgoals\"])),\n relations=[],\n comments=task_dict.get(\"comments\"),\n episodes=episodes,\n )\n\n @staticmethod\n def load_tasks(resource_package):\n \"\"\"\n Given a directory with\n \"\"\"\n tasks = list()\n task_id_to_task_dict = dict()\n task_name_to_task_dict = dict()\n task_dependencies = dict()\n resolved_task_names = set()\n required_keys = [\n \"task_id\",\n \"task_name\",\n \"task_nparams\",\n \"task_anchor_object\",\n \"desc\",\n \"components\",\n \"relations\",\n ]\n for task_file in importlib.resources.contents(resource_package):\n if not importlib.resources.is_resource(resource_package, task_file):\n continue\n if not task_file.endswith(\".json\"):\n continue\n\n logger.info(\"Processing file %s\" % task_file)\n with importlib.resources.open_text(\n resource_package, task_file\n ) as file_handle:\n task_definition = json.load(file_handle)\n if type(task_definition) != dict:\n raise RuntimeError(\n \"Badly formatted task file: \"\n + str(task_file)\n + \". Each task file must be a json dictionary with keys: \"\n + str(required_keys)\n )\n for key in required_keys:\n if key not in task_definition.keys():\n raise RuntimeError(\n \"Badly formatted task file. Missing key:\" + str(key)\n )\n task = Task_THOR(\n task_id=task_definition[\"task_id\"],\n task_name=task_definition[\"task_name\"],\n task_nparams=task_definition[\"task_nparams\"],\n task_params=None,\n task_anchor_object=task_definition[\"task_anchor_object\"],\n desc=task_definition[\"desc\"],\n components=task_definition[\"components\"],\n relations=task_definition[\"relations\"],\n comments=\"\",\n episodes=None,\n )\n tasks.append(task)\n if task.task_id in task_id_to_task_dict.keys():\n raise RuntimeError(\n \"Duplicate task_id \"\n + str(task.task_id)\n + \" with one occurrence in \"\n + str(task_file)\n )\n if task.task_name in task_name_to_task_dict.keys():\n raise RuntimeError(\n \"Duplicate task_name \"\n + str(task.task_name)\n + \" with one occurrence in \"\n + str(task_file)\n )\n task_id_to_task_dict[task.task_id] = task\n task_name_to_task_dict[task.task_name] = task\n\n task_dependencies[task.task_name] = list()\n resolved = True\n for component_name, component_dict in task.components.items():\n if \"task_name\" in component_dict:\n resolved = False\n task_dependencies[task.task_name].append(\n component_dict[\"task_name\"]\n )\n else:\n atomic_component_keys = {\n \"determiner\",\n \"primary_condition\",\n \"instance_shareable\",\n \"conditions\",\n \"condition_failure_descs\",\n }\n if (\n len(\n atomic_component_keys.difference(\n set(component_dict.keys())\n )\n )\n > 0\n ):\n raise RuntimeError(\n \"Improperly defined component \"\n + str(component_name)\n + \" in task \"\n + str(task.task_name)\n + \". Must contain keys: \"\n + str(atomic_component_keys)\n )\n if resolved:\n resolved_task_names.add(task.task_name)\n\n # logger.info(\"Loaded task names: %s\", task_name_to_task_dict.keys())\n\n # Resolve task dependencies\n unresolved_tasks = set()\n unresolvable_tasks = set()\n for task in tasks:\n resolved = True\n for component_name, component_dict in task.components.items():\n if \"task_name\" in component_dict:\n if component_dict[\"task_name\"] not in task_name_to_task_dict:\n unresolvable_tasks.add(\n (task.task_name, component_dict[\"task_name\"])\n )\n resolved = False\n break\n\n if component_dict[\"task_name\"] in resolved_task_names:\n task.components[component_name][\"task\"] = copy.deepcopy(\n task_name_to_task_dict[component_dict[\"task_name\"]]\n )\n task.components[component_name][\n \"task\"\n ].task_params = component_dict[\"task_params\"]\n else:\n unresolved_tasks.add(task.task_name)\n resolved = False\n break\n if resolved:\n resolved_task_names.add(task.task_name)\n\n if len(unresolvable_tasks) > 0:\n error_msg = \"Could not resolve the following tasks: \" + \"\\n\\t\".join(\n [\n 'Subtask \"' + str(dependency) + '\" in task \"' + str(task_name) + '\"'\n for (task_name, dependency) in unresolvable_tasks\n ]\n )\n raise RuntimeError(error_msg)\n\n while len(unresolved_tasks) > 0:\n # logger.info(\"Still resolving tasks: %s\", unresolved_tasks)\n for unresolved_task_name in unresolved_tasks:\n task = task_name_to_task_dict[unresolved_task_name]\n resolved = True\n for component_name, component_dict in task.components.items():\n if \"task_name\" in component_dict:\n if component_dict[\"task_name\"] in resolved_task_names:\n task.components[component_name][\"task\"] = copy.deepcopy(\n task_name_to_task_dict[component_dict[\"task_name\"]]\n )\n task.components[component_name][\n \"task\"\n ].task_params = component_dict[\"task_params\"]\n else:\n resolved = False\n break\n if resolved:\n resolved_task_names.add(task.task_name)\n unresolved_tasks = unresolved_tasks.difference(resolved_task_names)\n\n return tasks, task_id_to_task_dict, task_name_to_task_dict\n\n @staticmethod\n def __get_files_recursive(root_dir, file_list, extension=\".json\"):\n for path in Path(root_dir).iterdir():\n if path.is_dir():\n Task_THOR.__get_files_recursive(path, file_list)\n elif os.path.isfile(path) and path.suffix == extension:\n file_list.append(path.resolve())\n\n def __write_task_params_into_str(self, s):\n # Need to go through params in reverse order so that multiple digits get treated correctly\n for idx in range(len(self.task_params) - 1, -1, -1):\n s = s.replace(\"#%d\" % idx, self.task_params[idx])\n return s\n\n def __write_task_params_into_list(self, task_params_list):\n for idx, elem in enumerate(task_params_list):\n if type(elem) == str:\n task_params_list[idx] = self.__write_task_params_into_str(elem)\n elif type(elem) == list:\n task_params_list[idx] = self.__write_task_params_into_list(elem)\n elif type(elem) == dict:\n task_params_list[idx] = self.__write_task_params_into_dict(elem)\n elif isinstance(elem, Task_THOR):\n elem.write_task_params()\n return task_params_list\n\n def __write_task_params_into_dict(self, d):\n keys_to_delete = list()\n dict_items = list(d.items())\n for key, value in dict_items:\n key_with_params = self.__write_task_params_into_str(key)\n if key_with_params != key:\n keys_to_delete.append(key)\n if type(value) == str:\n d[key_with_params] = self.__write_task_params_into_str(value)\n # if the value is a variable that just got filled, they key is not a determiner and the value is numeric\n if np.char.isnumeric(d[key_with_params]) and key_with_params not in [\n \"determiner\"\n ]:\n # then this is a variable indicating the value of a simulator property that needs to be int\n d[key_with_params] = int(d[key_with_params])\n elif type(value) == list:\n d[key_with_params] = self.__write_task_params_into_list(value)\n elif type(value) == dict:\n d[key_with_params] = self.__write_task_params_into_dict(value)\n elif isinstance(value, Task_THOR):\n value.write_task_params()\n d[key_with_params] = value\n for key in keys_to_delete:\n del d[key]\n return d\n\n def write_task_params(self):\n try:\n assert len(self.task_params) == self.task_nparams\n except AssertionError as e:\n logger.error(\n f\"Task {self.task_name} takes {self.task_nparams} params but supplied {len(self.task_params)}\",\n exc_info=True,\n )\n raise e\n self.desc = self.__write_task_params_into_str(self.desc)\n if self.task_anchor_object is not None:\n self.task_anchor_object = self.__write_task_params_into_str(\n self.task_anchor_object\n )\n self.components = self.__write_task_params_into_dict(self.components)\n self.relations = self.__write_task_params_into_list(self.relations)\n\n def __get_object_by_id(self, m, obj_id):\n for obj in m:\n if obj[\"objectId\"] == obj_id:\n return obj\n return False\n\n def get_parent_receptacles(self, obj, objects):\n if \"parentReceptacles\" in obj and obj[\"parentReceptacles\"] is not None:\n return obj[\"parentReceptacles\"]\n\n elif \"simbotLastParentReceptacle\" in obj:\n immediate_parent_receptacle = obj[\"simbotLastParentReceptacle\"]\n if (\n immediate_parent_receptacle is not None\n and immediate_parent_receptacle != obj[\"objectId\"]\n ):\n # Second clause is to prevent infinite recursion in weird corner cases that should ideally never happen\n parent_receptacles = [immediate_parent_receptacle]\n immediate_parent_receptacle_obj = self.__get_object_by_id(\n objects, immediate_parent_receptacle\n )\n if type(immediate_parent_receptacle_obj) == dict:\n further_parent_receptacles = self.get_parent_receptacles(\n immediate_parent_receptacle_obj, objects\n )\n if further_parent_receptacles is not None:\n parent_receptacles += further_parent_receptacles\n return parent_receptacles\n\n return None\n\n def check_component_n_instances(\n self,\n all_objects_cur_state,\n component,\n num_instances,\n simulator=None,\n allow_state_change=False,\n ):\n if component[\"instance_shareable\"]:\n num_instances = 1\n component_success = False\n satisifed_objects = list()\n candidate_objects = list() # Contains num_instances closest matches\n output = dict()\n num_instances_to_check = num_instances\n\n for obj in all_objects_cur_state:\n props_sat = self.obj_satisfies_props(\n obj, component[\"conditions\"], all_objects_cur_state\n )\n props_sat_v = list(props_sat.values())\n\n if np.all(props_sat_v):\n satisifed_objects.append(obj)\n if len(satisifed_objects) >= num_instances:\n component_success = True\n continue\n\n if not component_success:\n # Closet object must match objectType, then heuristically whichever matches most conditions.\n # Primary condition (e.g., objectType) must match.\n if (\n props_sat[component[\"primary_condition\"]]\n or\n # Or, if condition (e.g., objectType) is a slice, can match the base object condition var.\n (\n \"Sliced\"\n in component[\"conditions\"][component[\"primary_condition\"]]\n and obj[component[\"primary_condition\"]]\n == component[\"conditions\"][\n component[\"primary_condition\"]\n ].replace(\"Sliced\", \"\")\n )\n or (\n \"Cracked\"\n in component[\"conditions\"][component[\"primary_condition\"]]\n and obj[component[\"primary_condition\"]]\n == component[\"conditions\"][\n component[\"primary_condition\"]\n ].replace(\"Cracked\", \"\")\n )\n ):\n # Either the primary condition is satisfied or can be with a state change\n non_primary_prop_vals = [\n value\n for (key, value) in props_sat.items()\n if key != component[\"primary_condition\"]\n ]\n if (\n np.all(non_primary_prop_vals)\n and \"Sliced\"\n in component[\"conditions\"][component[\"primary_condition\"]]\n ):\n # We already checked that the primary condition would be satisfied with a state change\n # And one object, say a potato can produce many slices\n num_instances_to_check = 1\n if allow_state_change:\n satisifed_objects.append(obj)\n if len(satisifed_objects) >= num_instances_to_check:\n component_success = True\n continue\n\n if not component_success:\n obj_dist = 0\n if simulator is not None:\n obj_dist = simulator.obj_dist_to_nearest_agent(obj)\n obj_candidate_dict = {\n \"object\": obj,\n \"props_sat\": props_sat,\n \"num_props_sat\": props_sat_v.count(True),\n \"distance_to_agent\": obj_dist,\n }\n if len(candidate_objects) == 0:\n candidate_objects.append(obj_candidate_dict)\n else:\n # Insert into sorted position\n insert_position = None\n for candidate_idx, cur_candidate in enumerate(\n candidate_objects\n ):\n if obj_candidate_dict[\"num_props_sat\"] > cur_candidate[\n \"num_props_sat\"\n ] or ( # Satisifes more\n obj_candidate_dict[\"num_props_sat\"]\n == cur_candidate[\"num_props_sat\"]\n and obj_candidate_dict[\"distance_to_agent\"]\n < cur_candidate[\"distance_to_agent\"]\n ): # Closer\n insert_position = candidate_idx\n break\n if insert_position is not None:\n # This is better than some existing candidate\n candidate_objects.insert(\n insert_position, obj_candidate_dict\n )\n else:\n # Worse than all existing candidates\n candidate_objects.append(obj_candidate_dict)\n\n num_unsatisfied_instances_needed = max(\n 0, num_instances_to_check - len(satisifed_objects)\n )\n\n output[\"success\"] = component_success\n output[\"satisfied_objects\"] = satisifed_objects\n output[\"candidate_objects\"] = [\n candidate[\"object\"] for candidate in candidate_objects\n ]\n num_conditions_per_obj = len(component[\"conditions\"].keys())\n output[\"goal_conditions_total\"] = num_conditions_per_obj * num_instances\n output[\"steps\"] = list()\n num_problem_objects = min(\n len(candidate_objects), num_unsatisfied_instances_needed\n )\n problem_objects = candidate_objects[:num_problem_objects]\n output[\"goal_conditions_satisfied\"] = (\n num_conditions_per_obj * min(num_instances, len(satisifed_objects))\n ) + sum([candidate[\"num_props_sat\"] for candidate in problem_objects])\n keys_to_problem_objects = dict()\n for candidate in problem_objects:\n for key in candidate[\"props_sat\"]:\n if (\n not candidate[\"props_sat\"][key]\n and key in component[\"condition_failure_descs\"]\n ):\n if key not in keys_to_problem_objects:\n keys_to_problem_objects[key] = list()\n keys_to_problem_objects[key].append(candidate)\n for key, desc in component[\"condition_failure_descs\"].items():\n if key in keys_to_problem_objects:\n for candidate in keys_to_problem_objects[key]:\n output[\"steps\"].append(\n {\n \"success\": False,\n \"objectId\": candidate[\"object\"][\"objectId\"],\n \"objectType\": candidate[\"object\"][\"objectType\"],\n \"desc\": desc,\n }\n )\n else:\n representative_obj = None\n if len(satisifed_objects) > 0:\n representative_obj = satisifed_objects[0]\n elif len(candidate_objects) > 0:\n representative_obj = candidate_objects[0][\"object\"]\n if representative_obj is not None:\n output[\"steps\"].append(\n {\n \"success\": True,\n \"objectId\": representative_obj[\"objectId\"],\n \"objectType\": representative_obj[\"objectType\"],\n \"desc\": desc,\n }\n )\n output[\"problem_keys\"] = dict()\n for candidate in problem_objects:\n output[\"problem_keys\"][candidate[\"object\"][\"objectId\"]] = list()\n for key in candidate[\"props_sat\"]:\n if not candidate[\"props_sat\"][key]:\n output[\"problem_keys\"][candidate[\"object\"][\"objectId\"]].append(\n {\n \"objectType\": candidate[\"object\"][\"objectType\"],\n \"determiner\": component[\"determiner\"],\n \"property_name\": key,\n \"desired_property_value\": component[\"conditions\"][key],\n }\n )\n return output\n\n def check_episode_preconditions(\n self, simulator, all_objects_cur_state, num_instances_needed=1\n ):\n \"\"\"\n :param simulator: instance of Simulator_THOR\n :param all_objects_cur_state: List of dictionaries, each of which has key, value pairs corresponding to\n current properties of an object in the environment\n :param num_instances_needed: Only relevant for tasks with task_anchor_object != None - Sets the number of anchor\n objects to be created\n \"\"\"\n self.write_task_params()\n\n for component in self.components.values():\n if component[\"determiner\"] == \"0\":\n continue\n\n component_instances_needed = num_instances_needed\n if component[\"determiner\"] == \"all\":\n component_instances_needed = 1\n allow_state_change = False\n else:\n if component[\"determiner\"] != \"a\":\n number_determiner = int(component[\"determiner\"])\n component_instances_needed *= number_determiner\n allow_state_change = True\n\n if \"task_name\" in component:\n component_success = component[\"task\"].check_episode_preconditions(\n simulator, all_objects_cur_state, component_instances_needed\n )\n if not component_success:\n return False\n else:\n component_existence_dict = dict()\n component_existence_dict[\"primary_condition\"] = component[\n \"primary_condition\"\n ]\n component_existence_dict[\"condition_failure_descs\"] = component[\n \"condition_failure_descs\"\n ]\n component_existence_dict[\"instance_shareable\"] = component[\n \"instance_shareable\"\n ]\n component_existence_dict[\"conditions\"] = dict()\n component_existence_dict[\"conditions\"][\n component[\"primary_condition\"]\n ] = component[\"conditions\"][component[\"primary_condition\"]]\n output = self.check_component_n_instances(\n all_objects_cur_state,\n component_existence_dict,\n component_instances_needed,\n simulator,\n allow_state_change,\n )\n if not output[\"success\"]:\n return False\n return True\n\n @staticmethod\n def get_obj_by_id(obj_id, objects):\n for obj in objects:\n if obj[\"objectId\"] == obj_id:\n return obj\n return None\n\n # Returns a list parallel to [props] of bools.\n def obj_satisfies_props(self, obj, props, all_objects):\n sats = {}\n for prop in props:\n # Property is not satisfied if the object doesn't even have it.\n if prop not in obj:\n sats[prop] = False\n continue\n\n if prop == \"objectType\":\n sats[prop] = self.check_object_type(obj, props[prop])\n continue\n elif prop == \"simbotLastParentReceptacle\" and props[prop] is not None:\n # need to match type / class rather than value\n value_obj = self.get_obj_by_id(obj[prop], all_objects)\n if value_obj is None or (\n not self.check_object_type(value_obj, props[prop])\n and not props[prop] in value_obj[\"simbotObjectClass\"]\n ):\n sats[prop] = False\n continue\n elif (\n prop == \"parentReceptacles\"\n ): # list of objectIds, which don't directly reveal objectType.\n parent_receptacles = self.get_parent_receptacles(obj, all_objects)\n parent_match = False\n if parent_receptacles is not None:\n for oid in parent_receptacles:\n _obj = self.get_obj_by_id(oid, all_objects)\n # value of parentReceptacle in JSON is objectType to contain.\n if (\n self.check_object_type(_obj, props[prop])\n or props[prop] in _obj[\"simbotObjectClass\"]\n ):\n parent_match = True\n if not parent_match:\n sats[prop] = False\n continue\n # Binary properties encoded as 1/0 truths in JSON.\n elif type(props[prop]) is int and (props[prop] == 1 or props[prop] == 0):\n if (obj[prop] and props[prop] == 0) or (\n not obj[prop] and props[prop] == 1\n ):\n sats[prop] = False\n continue\n # Properties that return lists.\n elif type(obj[prop]) is list:\n if props[prop] not in obj[prop]:\n sats[prop] = False\n continue\n # Direct value comparisons.\n elif props[prop] != obj[prop]:\n sats[prop] = False\n continue\n # If we get through all these condition checks without failing, prop is satisfied.\n sats[prop] = True\n assert len(props) == len(sats)\n return sats\n\n @staticmethod\n def check_object_type(obj, desired_value):\n if obj[\"objectType\"] == desired_value:\n return True\n elif (\n (obj[\"objectType\"] == \"SinkBasin\" and desired_value == \"Sink\")\n or (obj[\"objectType\"] == \"Sink\" and desired_value == \"SinkBasin\")\n or (obj[\"objectType\"] == \"BathtubBasin\" and desired_value == \"Bathtub\")\n or (obj[\"objectType\"] == \"Bathtub\" and desired_value == \"BathtubBasin\")\n ):\n return True\n else:\n return False\n\n def check_component_all_instances(self, all_objects_cur_state, component):\n if \"task_name\" in component:\n raise NotImplementedError(\n 'Determiner \"all\" is not supported with components that are Tasks'\n )\n\n success = True\n satisfied_objects = list()\n problem_objects = list()\n all_objects = all_objects_cur_state\n for obj in all_objects:\n props_sat = self.obj_satisfies_props(\n obj, component[\"conditions\"], all_objects\n )\n # If the object matches the primary condition, then it must satisfy properties.\n if props_sat[component[\"primary_condition\"]]:\n props_sat_v = list(props_sat.values())\n if not np.all(props_sat_v):\n success = False # if any one object doesn't satisfy, the subgoal isn't satisfied\n problem_objects.append({\"object\": obj, \"props_sat\": props_sat})\n else:\n satisfied_objects.append(obj)\n\n output = dict()\n output[\"success\"] = success\n # Total number of conditions needed in this component = Number of conditions per object * Number of object of\n # this type\n num_conditions_per_obj = len(component[\"conditions\"].keys())\n output[\"goal_conditions_total\"] = num_conditions_per_obj * (\n len(satisfied_objects) + len(problem_objects)\n )\n output[\"satisfied_objects\"] = satisfied_objects\n output[\"candidate_objects\"] = [\n candidate[\"object\"] for candidate in problem_objects\n ]\n # satisfied_objects have all conditions met; for the others add the number of conditions met\n output[\"goal_conditions_satisfied\"] = (\n num_conditions_per_obj * len(satisfied_objects)\n ) + sum([sum(candidate[\"props_sat\"].values()) for candidate in problem_objects])\n output[\"steps\"] = list()\n keys_to_problem_objects = dict()\n for candidate in problem_objects:\n for key in candidate[\"props_sat\"]:\n if (\n not candidate[\"props_sat\"][key]\n and key in component[\"condition_failure_descs\"]\n ):\n if key not in keys_to_problem_objects:\n keys_to_problem_objects[key] = list()\n keys_to_problem_objects[key].append(candidate)\n for key, desc in component[\"condition_failure_descs\"].items():\n if key in keys_to_problem_objects:\n for candidate in keys_to_problem_objects[key]:\n output[\"steps\"].append(\n {\n \"success\": False,\n \"objectId\": candidate[\"object\"][\"objectId\"],\n \"objectType\": candidate[\"object\"][\"objectType\"],\n \"desc\": desc,\n }\n )\n else:\n representative_obj = None\n if len(satisfied_objects) > 0:\n representative_obj = satisfied_objects[0]\n if representative_obj is not None:\n output[\"steps\"].append(\n {\n \"success\": True,\n \"objectId\": representative_obj[\"objectId\"],\n \"objectType\": representative_obj[\"objectType\"],\n \"desc\": desc,\n }\n )\n output[\"problem_keys\"] = dict()\n for candidate in problem_objects:\n output[\"problem_keys\"][candidate[\"object\"][\"objectId\"]] = list()\n for key in candidate[\"props_sat\"]:\n if not candidate[\"props_sat\"][key]:\n output[\"problem_keys\"][candidate[\"object\"][\"objectId\"]].append(\n {\n \"objectType\": candidate[\"object\"][\"objectType\"],\n \"determiner\": component[\"determiner\"],\n \"property_name\": key,\n \"desired_property_value\": component[\"conditions\"][key],\n }\n )\n\n return output\n\n def check_relation(\n self,\n relation,\n per_component_satisfied_objects,\n per_component_candidate_objects,\n all_objects_cur_state,\n num_task_instances=1,\n ):\n if len(relation[\"tail_entity_list\"]) > 1:\n raise NotImplementedError(\n \"Relation checking not implemented for relations with more than one ail entity. Check definition of task\"\n + str(self.task_name)\n )\n\n output = dict()\n # Assume one goal condition per object for which the relation is to be satisfied. Then count the number of head\n # objects to be adjusted\n output[\"goal_conditions_total\"] = 0\n for idx, head_determiner in enumerate(relation[\"head_determiner_list\"]):\n if head_determiner not in [\"a\", \"all\"]:\n output[\"goal_conditions_total\"] += num_task_instances * int(\n head_determiner\n )\n elif head_determiner == \"a\":\n output[\"goal_conditions_total\"] += num_task_instances\n else:\n head_entity = relation[\"head_entity_list\"][idx]\n head_candidate_objects = list()\n if head_entity in per_component_satisfied_objects:\n head_candidate_objects += per_component_satisfied_objects[\n head_entity\n ]\n if head_entity in per_component_candidate_objects:\n head_candidate_objects += per_component_candidate_objects[\n head_entity\n ]\n output[\"goal_conditions_total\"] += num_task_instances * len(\n head_candidate_objects\n )\n\n tail_determiner = relation[\"tail_determiner_list\"][0]\n tail_candidate_objects = list()\n if relation[\"tail_entity_list\"][0] in per_component_satisfied_objects:\n tail_candidate_objects += per_component_satisfied_objects[\n relation[\"tail_entity_list\"][0]\n ]\n if relation[\"tail_entity_list\"][0] in per_component_candidate_objects:\n tail_candidate_objects += per_component_candidate_objects[\n relation[\"tail_entity_list\"][0]\n ]\n tail_candidate_obj_ids = set(\n [obj[\"objectId\"] for obj in tail_candidate_objects]\n )\n if len(tail_candidate_obj_ids) < 1:\n output[\"success\"] = False\n output[\"satisfied_objects\"] = []\n output[\"steps\"] = []\n output[\"problem_keys\"] = []\n output[\"goal_conditions_satisfied\"] = 0\n return output\n tail_obj_type = self.get_obj_by_id(\n list(tail_candidate_obj_ids)[0], all_objects_cur_state\n )[\"objectType\"]\n num_head_entities = len(relation[\"head_entity_list\"])\n\n steps = list()\n problem_keys = dict()\n satisfied_objects = list()\n\n if tail_determiner == \"a\":\n success = True\n goal_conditions_satisfied = 0\n\n for idx in range(num_head_entities):\n cur_satisfied_objects = list()\n cur_unsatisfied_objects = list()\n head_determiner = relation[\"head_determiner_list\"][idx]\n if head_determiner == \"0\":\n continue\n\n head_entity = relation[\"head_entity_list\"][idx]\n head_candidate_objects = list()\n if head_entity in per_component_satisfied_objects:\n head_candidate_objects += per_component_satisfied_objects[\n head_entity\n ]\n if head_entity in per_component_candidate_objects:\n head_candidate_objects += per_component_candidate_objects[\n head_entity\n ]\n\n for head_obj in head_candidate_objects:\n if relation[\"property\"] == \"parentReceptacles\":\n head_property_vals = self.get_parent_receptacles(\n head_obj, all_objects_cur_state\n )\n else:\n head_property_vals = head_obj[relation[\"property\"]]\n cur_head_satisfied = False\n if head_property_vals is not None:\n for property_value_obj_id in head_property_vals:\n if property_value_obj_id in tail_candidate_obj_ids:\n cur_head_satisfied = True\n cur_satisfied_objects.append(head_obj)\n break\n if not cur_head_satisfied:\n cur_unsatisfied_objects.append(head_obj)\n goal_conditions_satisfied += len(cur_satisfied_objects)\n\n if head_determiner == \"all\":\n if len(cur_unsatisfied_objects) > 0:\n for obj in cur_unsatisfied_objects:\n steps.append(\n {\n \"success\": False,\n \"objectId\": obj[\"objectId\"],\n \"objectType\": obj[\"objectType\"],\n \"desc\": relation[\"failure_desc\"],\n }\n )\n if obj[\"objectId\"] not in problem_keys:\n problem_keys[obj[\"objectId\"]] = list()\n problem_keys[obj[\"objectId\"]].append(\n {\n \"objectType\": obj[\"objectType\"],\n \"determiner\": head_determiner,\n \"property_name\": relation[\"property\"],\n \"desired_property_value\": tail_obj_type,\n }\n )\n success = False\n elif len(cur_satisfied_objects) > 0:\n representative_obj = cur_satisfied_objects[0]\n steps.append(\n {\n \"success\": True,\n \"objectId\": representative_obj[\"objectId\"],\n \"objectType\": representative_obj[\"objectType\"],\n \"desc\": relation[\"failure_desc\"],\n }\n )\n else:\n num_instances_needed = num_task_instances\n if head_determiner != \"a\":\n num_instances_needed = num_task_instances * int(head_determiner)\n if len(cur_satisfied_objects) < num_instances_needed:\n success = False\n num_unsatisfied_objects_needed = num_instances_needed - len(\n cur_satisfied_objects\n )\n num_unsatisfied_objects_available = min(\n num_unsatisfied_objects_needed, len(cur_unsatisfied_objects)\n )\n for obj in cur_unsatisfied_objects[\n :num_unsatisfied_objects_available\n ]:\n steps.append(\n {\n \"success\": False,\n \"objectId\": obj[\"objectId\"],\n \"objectType\": obj[\"objectType\"],\n \"desc\": relation[\"failure_desc\"],\n }\n )\n if obj[\"objectId\"] not in problem_keys:\n problem_keys[obj[\"objectId\"]] = list()\n problem_keys[obj[\"objectId\"]].append(\n {\n \"objectType\": obj[\"objectType\"],\n \"determiner\": head_determiner,\n \"property_name\": relation[\"property\"],\n \"desired_property_value\": tail_obj_type,\n }\n )\n satisfied_objects += cur_satisfied_objects\n\n output[\"success\"] = success\n output[\"satisfied_objects\"] = satisfied_objects\n output[\"steps\"] = steps\n output[\"problem_keys\"] = problem_keys\n output[\"goal_conditions_satisfied\"] = goal_conditions_satisfied\n if (\n output[\"success\"]\n and len(output[\"satisfied_objects\"]) > 0\n and len(output[\"steps\"]) == 0\n ):\n representative_obj = satisfied_objects[0]\n steps.append(\n {\n \"success\": True,\n \"objectId\": representative_obj[\"objectId\"],\n \"objectType\": representative_obj[\"objectType\"],\n \"desc\": relation[\"failure_desc\"],\n }\n )\n return output\n\n elif tail_determiner == \"the\":\n satisfied_tail_objs = list()\n sorted_candidates_tail_objs = list()\n\n for tail_obj in tail_candidate_objects:\n tail_obj_id = tail_obj[\"objectId\"]\n cur_tail_obj_status = dict()\n cur_tail_obj_status[\"tail_obj\"] = tail_obj\n cur_tail_obj_status[\"per_head_status\"] = list()\n\n for idx in range(num_head_entities):\n cur_satisfied_objects = list()\n cur_unsatisfied_objects = list()\n cur_unsatisfied_descs = list()\n cur_unsatisfied_keys = list()\n\n head_determiner = relation[\"head_determiner_list\"][idx]\n if head_determiner == \"0\":\n continue\n head_entity = relation[\"head_entity_list\"][idx]\n head_candidate_objects = list()\n if head_entity in per_component_satisfied_objects:\n head_candidate_objects += per_component_satisfied_objects[\n head_entity\n ]\n if head_entity in per_component_candidate_objects:\n head_candidate_objects += per_component_candidate_objects[\n head_entity\n ]\n\n for head_obj in head_candidate_objects:\n if relation[\"property\"] == \"parentReceptacles\":\n head_property_vals = self.get_parent_receptacles(\n head_obj, all_objects_cur_state\n )\n else:\n head_property_vals = head_obj[relation[\"property\"]]\n if (\n head_property_vals is not None\n and tail_obj_id in head_property_vals\n ):\n cur_satisfied_objects.append(head_obj)\n else:\n cur_unsatisfied_objects.append(head_obj)\n cur_unsatisfied_descs.append([relation[\"failure_desc\"]])\n cur_unsatisfied_keys.append(\n [\n [\n head_determiner,\n relation[\"property\"],\n tail_obj[\"objectType\"],\n ]\n ]\n )\n\n if head_determiner == \"all\":\n instances_needed = len(head_candidate_objects)\n elif head_determiner == \"a\":\n instances_needed = 1\n else:\n instances_needed = int(head_determiner)\n\n cur_tail_obj_status[\"per_head_status\"].append(\n {\n \"head_determiner\": head_determiner,\n \"head_entity\": head_entity,\n \"satisfied_objects\": cur_satisfied_objects,\n \"unsatisfied_objects\": cur_unsatisfied_objects,\n \"unsatisfied_descs\": cur_unsatisfied_descs,\n \"unsatisfied_keys\": cur_unsatisfied_keys,\n \"instances_needed\": instances_needed,\n \"goal_conditions_satisfied\": min(\n instances_needed, len(cur_satisfied_objects)\n ),\n \"success\": len(cur_satisfied_objects) >= instances_needed,\n }\n )\n\n success = np.all(\n [e[\"success\"] for e in cur_tail_obj_status[\"per_head_status\"]]\n )\n if success:\n satisfied_tail_objs.append(cur_tail_obj_status)\n if len(satisfied_tail_objs) >= num_task_instances:\n break\n continue\n\n num_heads_satisfied = sum(\n [e[\"success\"] for e in cur_tail_obj_status[\"per_head_status\"]]\n )\n instances_per_head_satisfied = [\n min(e[\"instances_needed\"], len(e[\"satisfied_objects\"]))\n for e in cur_tail_obj_status[\"per_head_status\"]\n ]\n cur_tail_obj_status[\"num_heads_satisfied\"] = num_heads_satisfied\n cur_tail_obj_status[\"num_instances_satisfied\"] = sum(\n instances_per_head_satisfied\n )\n inserted = False\n for idx in range(len(sorted_candidates_tail_objs)):\n if (\n cur_tail_obj_status[\"num_heads_satisfied\"]\n > sorted_candidates_tail_objs[idx][\"num_heads_satisfied\"]\n or cur_tail_obj_status[\"num_instances_satisfied\"]\n > sorted_candidates_tail_objs[idx][\"num_instances_satisfied\"]\n ):\n sorted_candidates_tail_objs.insert(idx, cur_tail_obj_status)\n inserted = True\n break\n if not inserted:\n sorted_candidates_tail_objs.append(cur_tail_obj_status)\n num_tail_candidates_needed = max(\n 0, num_task_instances - len(satisfied_tail_objs)\n )\n if len(sorted_candidates_tail_objs) > num_tail_candidates_needed:\n sorted_candidates_tail_objs = sorted_candidates_tail_objs[\n :num_tail_candidates_needed\n ]\n\n if len(satisfied_tail_objs) == 0 and len(sorted_candidates_tail_objs) == 0:\n # This should ideally never happen - it means there are no tail candidates\n raise NotImplementedError(\n 'Not implemented - handling tail determiner \"the\" with no tail entity candidates'\n )\n else:\n output[\"success\"] = len(satisfied_tail_objs) >= num_task_instances\n output[\"satisfied_objects\"] = list()\n output[\"goal_conditions_satisfied\"] = 0\n for idx, tail_obj in enumerate(satisfied_tail_objs):\n for head_status in tail_obj[\"per_head_status\"]:\n if idx < num_task_instances:\n output[\"goal_conditions_satisfied\"] += head_status[\n \"goal_conditions_satisfied\"\n ]\n output[\"satisfied_objects\"] += head_status[\"satisfied_objects\"]\n\n output[\"steps\"] = list()\n output[\"problem_keys\"] = dict()\n num_tail_candidates_needed = max(\n 0, num_task_instances - len(satisfied_tail_objs)\n )\n if num_tail_candidates_needed > 0:\n tail_candidates = sorted_candidates_tail_objs[\n :num_tail_candidates_needed\n ]\n for tail_obj in tail_candidates:\n for head_details in tail_obj[\"per_head_status\"]:\n output[\"goal_conditions_satisfied\"] += head_details[\n \"goal_conditions_satisfied\"\n ]\n num_problem_instances = head_details[\n \"instances_needed\"\n ] - len(head_details[\"satisfied_objects\"])\n if num_problem_instances > 0:\n num_problem_instances_available = min(\n num_problem_instances,\n len(head_details[\"unsatisfied_objects\"]),\n )\n for obj in head_details[\"unsatisfied_objects\"][\n :num_problem_instances_available\n ]:\n output[\"steps\"].append(\n {\n \"success\": False,\n \"objectId\": obj[\"objectId\"],\n \"objectType\": obj[\"objectType\"],\n \"desc\": relation[\"failure_desc\"],\n }\n )\n if obj[\"objectId\"] not in output[\"problem_keys\"]:\n output[\"problem_keys\"][obj[\"objectId\"]] = list()\n output[\"problem_keys\"][obj[\"objectId\"]].append(\n {\n \"determiner\": head_details[\n \"head_determiner\"\n ],\n \"property_name\": relation[\"property\"],\n \"desired_property_value\": tail_obj[\n \"tail_obj\"\n ][\"objectType\"],\n \"objectType\": obj[\"objectType\"],\n }\n )\n return output\n else:\n raise NotImplementedError(\n \"No support for tail determiner: \"\n + str(tail_determiner)\n + \". Supported values: a. the\"\n )\n\n @staticmethod\n def flatten_list(input_list):\n return [item for sublist in input_list for item in sublist]\n\n @staticmethod\n def get_combined_problem_key_dict(list_of_dicts):\n output_dict = dict()\n for input_dict in list_of_dicts:\n for key in input_dict:\n if key in output_dict:\n output_dict[key] += input_dict[key]\n else:\n output_dict[key] = input_dict[key]\n return output_dict\n\n # Takes in output of check_episode progress and picks a reasonable object to show with this task's subgoal\n @staticmethod\n def __get_representative_obj_id(task_output):\n if (\n not task_output[\"success\"]\n and \"candidate_objects\" in task_output\n and task_output[\"candidate_objects\"] is not None\n and len(task_output[\"candidate_objects\"]) > 0\n ):\n return task_output[\"candidate_objects\"][0][\"objectId\"]\n elif (\n \"satisfied_objects\" in task_output\n and task_output[\"satisfied_objects\"] is not None\n and len(task_output[\"satisfied_objects\"]) > 0\n ):\n return task_output[\"satisfied_objects\"][0][\"objectId\"]\n elif (\n \"candidate_objects\" in task_output\n and task_output[\"candidate_objects\"] is not None\n and len(task_output[\"candidate_objects\"]) > 0\n ):\n return task_output[\"candidate_objects\"][0][\"objectId\"]\n else:\n last_obj_id = \"\"\n for subgoal in task_output[\"subgoals\"]:\n for step in subgoal[\"steps\"]:\n if step[\"success\"]:\n return step[\"objectId\"]\n last_obj_id = step[\"objectId\"]\n return last_obj_id\n\n def check_episode_progress(\n self,\n all_objects_cur_state,\n simulator=None,\n num_instances_needed=1,\n use_task_candidates_in_relations=False,\n ):\n \"\"\"\n :param all_objects_cur_state: List of dictionaries, each of which has key, value pairs corresponding to\n current properties of an object in the environment\n :param simulator: instance of Simulator_THOR or None. If set to None progress check output will not sort\n candidates by distance to agent\n :param num_instances_needed: Only relevant for tasks with task_anchor_object != None - Sets the number of anchor\n objects to be created\n :param use_task_candidates_in_relations: Set to True if relations should be checked using incomplete subtasks\n \"\"\"\n self.write_task_params()\n\n all_subgoals = list()\n task_satisfied_objects = None\n task_candidate_objects = None\n per_component_satisfied_objects = dict()\n per_component_candidate_objects = dict()\n\n for component_key, component in self.components.items():\n component_instances_needed = num_instances_needed\n\n if component[\"determiner\"] == \"0\":\n continue\n\n if component[\"determiner\"] == \"all\":\n component_output = self.check_component_all_instances(\n all_objects_cur_state, component\n )\n component_subgoal = dict()\n component_subgoal[\"success\"] = component_output[\"success\"]\n component_subgoal[\"description\"] = \"\"\n component_subgoal[\"steps\"] = component_output[\"steps\"]\n component_subgoal[\"problem_keys\"] = component_output[\"problem_keys\"]\n component_subgoal[\"goal_conditions_total\"] = component_output[\n \"goal_conditions_total\"\n ]\n component_subgoal[\"goal_conditions_satisfied\"] = component_output[\n \"goal_conditions_satisfied\"\n ]\n if (\n not component_output[\"success\"]\n or len(component_output[\"steps\"]) > 0\n ):\n all_subgoals.append(component_subgoal)\n\n per_component_satisfied_objects[component_key] = component_output[\n \"satisfied_objects\"\n ]\n per_component_candidate_objects[component_key] = component_output[\n \"candidate_objects\"\n ]\n if self.task_anchor_object == component_key:\n task_satisfied_objects = component_output[\"satisfied_objects\"]\n task_candidate_objects = component_output[\"candidate_objects\"]\n\n else:\n if component[\"determiner\"] != \"a\":\n number_determiner = int(component[\"determiner\"])\n component_instances_needed *= number_determiner\n\n if \"task_name\" in component:\n component_output = component[\"task\"].check_episode_progress(\n all_objects_cur_state,\n simulator,\n num_instances_needed=component_instances_needed,\n use_task_candidates_in_relations=use_task_candidates_in_relations,\n )\n\n component_subgoal = dict()\n component_subgoal[\n \"representative_obj_id\"\n ] = self.__get_representative_obj_id(component_output)\n component_subgoal[\"step_successes\"] = [\n subgoal[\"success\"] for subgoal in component_output[\"subgoals\"]\n ]\n component_subgoal[\"success\"] = np.all(\n component_subgoal[\"step_successes\"]\n )\n component_subgoal[\"description\"] = component[\"task\"].desc\n if component[\"determiner\"] not in [\"a\", \"the\", \"all\"]:\n component_subgoal[\"description\"] = (\n component[\"determiner\"]\n + \" x \"\n + component_subgoal[\"description\"]\n )\n component_subgoal[\"steps\"] = self.flatten_list(\n [\n [step for step in subgoal[\"steps\"] if not step[\"success\"]]\n for subgoal in component_output[\"subgoals\"]\n ]\n )\n component_subgoal[\n \"problem_keys\"\n ] = self.get_combined_problem_key_dict(\n [\n subgoal[\"problem_keys\"]\n for subgoal in component_output[\"subgoals\"]\n ]\n )\n component_subgoal[\"goal_conditions_total\"] = component_output[\n \"goal_conditions_total\"\n ]\n component_subgoal[\"goal_conditions_satisfied\"] = component_output[\n \"goal_conditions_satisfied\"\n ]\n all_subgoals.append(component_subgoal)\n\n per_component_satisfied_objects[component_key] = component_output[\n \"satisfied_objects\"\n ]\n if use_task_candidates_in_relations:\n per_component_candidate_objects[\n component_key\n ] = component_output[\"candidate_objects\"]\n if self.task_anchor_object == component_key:\n task_satisfied_objects = component_output[\"satisfied_objects\"]\n task_candidate_objects = component_output[\"candidate_objects\"]\n\n else:\n component_output = self.check_component_n_instances(\n all_objects_cur_state,\n component,\n num_instances=component_instances_needed,\n simulator=simulator,\n allow_state_change=False,\n )\n\n component_subgoal = dict()\n component_subgoal[\"success\"] = component_output[\"success\"]\n component_subgoal[\"description\"] = \"\"\n component_subgoal[\"steps\"] = component_output[\"steps\"]\n component_subgoal[\"problem_keys\"] = component_output[\"problem_keys\"]\n component_subgoal[\"goal_conditions_total\"] = component_output[\n \"goal_conditions_total\"\n ]\n component_subgoal[\"goal_conditions_satisfied\"] = component_output[\n \"goal_conditions_satisfied\"\n ]\n all_subgoals.append(component_subgoal)\n\n per_component_satisfied_objects[component_key] = component_output[\n \"satisfied_objects\"\n ]\n per_component_candidate_objects[component_key] = component_output[\n \"candidate_objects\"\n ]\n if self.task_anchor_object == component_key:\n task_satisfied_objects = component_output[\"satisfied_objects\"]\n task_candidate_objects = component_output[\"candidate_objects\"]\n\n for relation in self.relations:\n relation_output = self.check_relation(\n relation,\n per_component_satisfied_objects,\n per_component_candidate_objects,\n all_objects_cur_state,\n num_instances_needed,\n )\n relation_subgoal = dict()\n relation_subgoal[\"success\"] = relation_output[\"success\"]\n relation_subgoal[\"description\"] = \"\"\n relation_subgoal[\"steps\"] = relation_output[\"steps\"]\n relation_subgoal[\"problem_keys\"] = relation_output[\"problem_keys\"]\n relation_subgoal[\"goal_conditions_total\"] = relation_output[\n \"goal_conditions_total\"\n ]\n relation_subgoal[\"goal_conditions_satisfied\"] = relation_output[\n \"goal_conditions_satisfied\"\n ]\n all_subgoals.append(relation_subgoal)\n\n task_output = dict()\n task_output[\"description\"] = self.desc\n task_output[\"success\"] = np.all(\n [subgoal[\"success\"] for subgoal in all_subgoals]\n )\n task_output[\"satisfied_objects\"] = task_satisfied_objects\n task_output[\"candidate_objects\"] = task_candidate_objects\n task_output[\"subgoals\"] = all_subgoals\n task_output[\"goal_conditions_total\"] = sum(\n [subgoal[\"goal_conditions_total\"] for subgoal in all_subgoals]\n )\n task_output[\"goal_conditions_satisfied\"] = sum(\n [subgoal[\"goal_conditions_satisfied\"] for subgoal in all_subgoals]\n )\n\n return task_output" } ]
import json import os from collections import OrderedDict from langsuite.envs.teach.libs.teach.dataset.definitions import Definitions from langsuite.envs.teach.libs.teach.dataset.task import Task from langsuite.envs.teach.libs.teach.dataset.task_THOR import Task_THOR
14,080
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from __future__ import annotations class Dataset: def __init__( self, task_type=None, definitions=None, comments="", version=None, tasks=None ): self.version = "2.0" if version is None else version self.task_type = task_type self.comments = comments self.definitions = ( Definitions(definitions=definitions, version=version) if definitions is None else definitions ) self.tasks = tasks if tasks is not None else [] def add_task(self, task): self.tasks.append(task) def to_dict(self): _dict = OrderedDict() _dict["version"] = self.version _dict["task_type"] = self.task_type _dict["comments"] = self.comments _dict["definitions"] = self.definitions.to_dict() _dict["tasks"] = [x.to_dict() for x in self.tasks] return _dict @classmethod def from_dict( cls, dataset_dict, process_init_state=True, version="2.0" ) -> "Dataset": definitions = Definitions(dataset_dict["definitions"]) if version == "2.0": tasks = [ Task_THOR.from_dict(task_dict, definitions, process_init_state) for task_dict in dataset_dict.get("tasks") ] else: tasks = [
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT-0 from __future__ import annotations class Dataset: def __init__( self, task_type=None, definitions=None, comments="", version=None, tasks=None ): self.version = "2.0" if version is None else version self.task_type = task_type self.comments = comments self.definitions = ( Definitions(definitions=definitions, version=version) if definitions is None else definitions ) self.tasks = tasks if tasks is not None else [] def add_task(self, task): self.tasks.append(task) def to_dict(self): _dict = OrderedDict() _dict["version"] = self.version _dict["task_type"] = self.task_type _dict["comments"] = self.comments _dict["definitions"] = self.definitions.to_dict() _dict["tasks"] = [x.to_dict() for x in self.tasks] return _dict @classmethod def from_dict( cls, dataset_dict, process_init_state=True, version="2.0" ) -> "Dataset": definitions = Definitions(dataset_dict["definitions"]) if version == "2.0": tasks = [ Task_THOR.from_dict(task_dict, definitions, process_init_state) for task_dict in dataset_dict.get("tasks") ] else: tasks = [
Task.from_dict(task_dict, definitions, process_init_state)
1
2023-11-01 01:47:00+00:00
16k
radekd91/inferno
inferno/datasets/VideoDatasetBase.py
[ { "identifier": "KeypointNormalization", "path": "inferno/transforms/keypoints.py", "snippet": "class KeypointNormalization(KeypointTransform):\n\n def __init__(self, scale_x=1., scale_y=1.):\n super().__init__(scale_x, scale_y)\n\n def forward(self, points):\n # normalization the way EMOCA uses it.\n # the keypoints are not used in image space but in normalized space\n # for loss computation\n # the normalization is as follows:\n if isinstance(points, torch.Tensor):\n points_ = points.clone()\n elif isinstance(points, np.ndarray):\n points_ = points.copy()\n else:\n raise ValueError(f\"Invalid type of points {str(type(points))}\")\n points_[..., 0] -= self.scale_x/2\n points_[..., 0] /= self.scale_x/2\n points_[..., 1] -= self.scale_y/2\n points_[..., 1] /= self.scale_y/2\n return points_\n\n def inv(self, points):\n if isinstance(points, torch.Tensor):\n points_ = points.clone()\n elif isinstance(points, np.ndarray):\n points_ = points.copy()\n else:\n raise ValueError(f\"Invalid type of points {str(type(points))}\")\n points_[..., 0] *= self.scale_x / 2\n points_[..., 0] += self.scale_x / 2\n points_[..., 1] *= self.scale_y / 2\n points_[..., 1] += self.scale_y / 2\n return points_" }, { "identifier": "KeypointScale", "path": "inferno/transforms/keypoints.py", "snippet": "class KeypointScale(KeypointTransform):\n\n def __init__(self, scale_x=1., scale_y=1.):\n super().__init__(scale_x, scale_y)\n\n def forward(self, points):\n points_ = points.clone()\n points_[..., 0] *= self.scale_x\n points_[..., 1] *= self.scale_y\n return points_" }, { "identifier": "MediaPipeFaceOccluder", "path": "inferno/utils/MediaPipeFaceOccluder.py", "snippet": "class MediaPipeFaceOccluder(object):\n\n def __init__(self) -> None:\n self.left_eye = left_eye_eyebrow_landmark_indices()\n self.right_eye = right_eye_eyebrow_landmark_indices()\n self.mouth = mouth_landmark_indices()\n self.face_oval = face_oval_landmark_indices()\n self.face_all = all_face_landmark_indices()\n\n def bounding_box(self, landmarks, region): \n landmarks = landmarks[:, :2]\n if region == \"all\":\n left = np.min(landmarks[:, 0])\n right = np.max(landmarks[:, 0])\n top = np.min(landmarks[:, 1])\n bottom = np.max(landmarks[:, 1])\n elif region == \"left_eye\": \n left = np.min(landmarks[self.left_eye, 0])\n right = np.max(landmarks[self.left_eye, 0])\n top = np.min(landmarks[self.left_eye, 1])\n bottom = np.max(landmarks[self.left_eye, 1])\n elif region == \"right_eye\": \n left = np.min(landmarks[self.right_eye, 0])\n right = np.max(landmarks[self.right_eye, 0])\n top = np.min(landmarks[self.right_eye, 1])\n bottom = np.max(landmarks[self.right_eye, 1])\n elif region == \"mouth\": \n left = np.min(landmarks[self.mouth, 0])\n right = np.max(landmarks[self.mouth, 0])\n top = np.min(landmarks[self.mouth, 1])\n bottom = np.max(landmarks[self.mouth, 1])\n else: \n raise ValueError(f\"Invalid region {region}\")\n\n width = right - left\n height = bottom - top\n center_x = left + width / 2\n center_y = top + height / 2\n \n center = np.stack([center_x, center_y], axis=1).round().astype(np.int32)\n size = np.stack([width, height], axis=1).round().astype(np.int32)\n\n bb = np.array([left, right, top, bottom], dtype = np.int32)\n sizes = np.concatenate([center, size])\n return bb, sizes\n \n def bounding_box_batch(self, landmarks, region): \n assert landmarks.ndim == 3\n landmarks = landmarks[:, :, :2]\n if region == \"all\":\n left = np.min(landmarks[:,:, 0], axis=1)\n right = np.max(landmarks[:,:, 0], axis=1)\n top = np.min(landmarks[:,:, 1], axis=1)\n bottom = np.max(landmarks[:,:, 1], axis=1)\n elif region == \"left_eye\": \n left = np.min(landmarks[:,self.left_eye, 0], axis=1)\n right = np.max(landmarks[:,self.left_eye, 0], axis=1)\n top = np.min(landmarks[:,self.left_eye, 1], axis=1)\n bottom = np.max(landmarks[:,self.left_eye, 1], axis=1)\n elif region == \"right_eye\": \n left = np.min(landmarks[:,self.right_eye, 0], axis=1)\n right = np.max(landmarks[:,self.right_eye, 0], axis=1)\n top = np.min(landmarks[:,self.right_eye, 1], axis=1)\n bottom = np.max(landmarks[:,self.right_eye, 1], axis=1)\n elif region == \"mouth\": \n left = np.min(landmarks[:,self.mouth, 0], axis=1)\n right = np.max(landmarks[:,self.mouth, 0], axis=1)\n top = np.min(landmarks[:,self.mouth, 1], axis=1)\n bottom = np.max(landmarks[:,self.mouth, 1], axis=1)\n else: \n raise ValueError(f\"Invalid region {region}\")\n \n width = right - left\n height = bottom - top\n centers_x = left + width / 2\n centers_y = top + height / 2\n bb = np.stack([left, right, top, bottom], axis=1).round().astype(np.int32)\n sizes = np.stack([centers_x, centers_y, width, height], axis=1).round().astype(np.int32)\n return bb, sizes\n\n def occlude(self, image, region, landmarks=None, bounding_box=None):\n assert landmarks is not None and bounding_box is not None, \"Specify either landmarks or bounding_box\"\n if landmarks is not None: \n bounding_box = self.bounding_box(landmarks, region) \n \n image[bounding_box[2]:bounding_box[3], bounding_box[0]:bounding_box[1], ...] = 0 \n return image\n\n def occlude_batch(self, image, region, landmarks=None, bounding_box_batch=None\n , start_frame=None, end_frame=None, validity=None): \n assert not(landmarks is not None and bounding_box_batch is not None), \"Specify either landmarks or bounding_box\"\n start_frame = start_frame or 0\n end_frame = end_frame or image.shape[0]\n assert end_frame <= image.shape[0]\n if landmarks is not None:\n bounding_box_batch, sizes_batch = self.bounding_box_batch(landmarks, region) \n for i in range(start_frame, end_frame): \n if validity is not None and not validity[i]: # if the bounding box is not valid, occlude nothing\n continue\n image[i, bounding_box_batch[i, 2]:bounding_box_batch[i, 3], bounding_box_batch[i, 0]:bounding_box_batch[i, 1], ...] = 0\n \n # # do the above without a for loop \n # image[:, bounding_box_batch[:, 2]:bounding_box_batch[:, 3], bounding_box_batch[:, 0]:bounding_box_batch[:, 1], ...] = 0\n return image" }, { "identifier": "sizes_to_bb_batch", "path": "inferno/utils/MediaPipeFaceOccluder.py", "snippet": "def sizes_to_bb_batch(sizes):\n left = sizes[:, 0] - sizes[:, 2]\n right = sizes[:, 0] + sizes[:, 2]\n top = sizes[:, 1] - sizes[:, 3]\n bottom = sizes[:, 1] + sizes[:, 3]\n return np.stack([left, right, top, bottom], axis=1)" }, { "identifier": "FaceDataModuleBase", "path": "inferno/datasets/FaceDataModuleBase.py", "snippet": "class FaceDataModuleBase(pl.LightningDataModule):\n \"\"\"\n A base data module for face datasets. This DM can be inherited by any face datasets, which just adapt things \n to the dataset's specificities (such as different GT or data storage structure). \n This class can take care of face detection, recognition, segmentation and landmark detection.\n \"\"\"\n\n def __init__(self, root_dir, output_dir, processed_subfolder, device=None,\n face_detector='fan',\n face_detector_threshold=0.9,\n image_size=224,\n scale=1.25,\n bb_center_shift_x=0., # in relative numbers\n bb_center_shift_y=0., # in relative numbers (i.e. -0.1 for 10% shift upwards, ...)\n processed_ext=\".png\", \n save_detection_images=True, \n save_landmarks_frame_by_frame=True, # default\n save_landmarks_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n save_segmentation_frame_by_frame=True, # default\n save_segmentation_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n return_mica_images=False,\n ):\n super().__init__()\n self.root_dir = root_dir\n self.output_dir = output_dir\n self.bb_center_shift_x = bb_center_shift_x\n self.bb_center_shift_y = bb_center_shift_y\n self.processed_ext = processed_ext\n self.save_detection_images=save_detection_images\n self.save_landmarks_frame_by_frame = save_landmarks_frame_by_frame\n self.save_landmarks_one_file = save_landmarks_one_file\n assert not (save_landmarks_one_file and save_landmarks_frame_by_frame) # only one of them can be true\n self.save_segmentation_frame_by_frame = save_segmentation_frame_by_frame\n self.save_segmentation_one_file = save_segmentation_one_file\n assert not (save_segmentation_one_file and save_segmentation_frame_by_frame) # only one of them can be true\n\n if processed_subfolder is None:\n import datetime\n date = datetime.datetime.now()\n processed_folder = os.path.join(output_dir, \"processed_%s\" % date.strftime(\"%Y_%b_%d_%H-%M-%S\"))\n else:\n processed_folder = os.path.join(output_dir, processed_subfolder)\n self.output_dir = processed_folder\n\n self.device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n self.face_detector_type = face_detector\n self.face_detector_threshold = face_detector_threshold\n\n self.image_size = image_size\n self.scale = scale\n self.return_mica_images = return_mica_images\n\n def _get_max_faces_per_image(self): \n return 1\n \n def _is_video_dataset(self): \n return False\n\n # @profile\n def _instantiate_detector(self, overwrite = False, face_detector=None):\n face_detector = face_detector or self.face_detector_type\n if hasattr(self, 'face_detector'):\n if not overwrite:\n return\n del self.face_detector\n if self.face_detector_type == 'fan':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='2D')\n elif self.face_detector_type == 'fan3d':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='3D')\n elif self.face_detector_type == 'mtcnn':\n self.face_detector = MTCNN(self.device)\n elif self.face_detector_type == '3fabrec': \n from inferno.utils.TFabRecLandmarkDetector import TFabRec\n self.face_detector = TFabRec(instantiate_detector='sfd', threshold=self.face_detector_threshold)\n elif self.face_detector_type == 'mediapipe': \n from inferno.utils.MediaPipeLandmarkDetector import MediaPipeLandmarkDetector\n self.face_detector = MediaPipeLandmarkDetector(threshold=self.face_detector_threshold, \n video_based=self._is_video_dataset(), max_faces=self._get_max_faces_per_image())\n elif self.face_detector_type == 'deep3dface': \n from inferno.utils.Deep3DFaceLandmarkDetector import Deep3DFaceLandmarkDetector\n self.face_detector = Deep3DFaceLandmarkDetector(instantiate_detector='mtcnn')\n else:\n raise ValueError(\"Invalid face detector specifier '%s'\" % self.face_detector)\n\n # @profile\n def _detect_faces_in_image(self, image_or_path, detected_faces=None):\n # imagepath = self.imagepath_list[index]\n # imagename = imagepath.split('/')[-1].split('.')[0]\n if isinstance(image_or_path, (str, Path)):\n image = np.array(imread(image_or_path))\n elif isinstance(image_or_path, np.ndarray):\n image = image_or_path\n else: \n raise ValueError(\"Invalid image type '%s'\" % type(image_or_path)) \n \n if len(image.shape) == 2:\n image = np.tile(image[:, :, None], (1, 1, 3))\n if len(image.shape) == 3 and image.shape[2] > 3:\n image = image[:, :, :3]\n\n h, w, _ = image.shape\n self._instantiate_detector()\n bounding_boxes, bbox_type, landmarks = self.face_detector.run(image,\n with_landmarks=True,\n detected_faces=detected_faces)\n image = image / 255.\n detection_images = []\n detection_centers = []\n detection_sizes = []\n detection_landmarks = [] # landmarks wrt the detection image\n # original_landmarks = [] # landmarks wrt the original image\n original_landmarks = landmarks # landmarks wrt the original image\n # detection_embeddings = []\n if len(bounding_boxes) == 0:\n # print('no face detected! run original image')\n return detection_images, detection_centers, detection_images, \\\n bbox_type, detection_landmarks, original_landmarks\n # left = 0\n # right = h - 1\n # top = 0\n # bottom = w - 1\n # bounding_boxes += [[left, right, top, bottom]]\n\n for bi, bbox in enumerate(bounding_boxes):\n left = bbox[0]\n right = bbox[2]\n top = bbox[1]\n bottom = bbox[3]\n old_size, center = bbox2point(left, right, top, bottom, type=bbox_type)\n\n center[0] += abs(right-left)*self.bb_center_shift_x\n center[1] += abs(bottom-top)*self.bb_center_shift_y\n\n size = int(old_size * self.scale)\n\n dst_image, dts_landmark = bbpoint_warp(image, center, size, self.image_size, landmarks=landmarks[bi])\n\n # dst_image = dst_image.transpose(2, 0, 1)\n #\n detection_images += [(dst_image*255).astype(np.uint8)]\n detection_centers += [center]\n detection_sizes += [size]\n\n # imsave(os.path.join(\"detection_%d.png\" % bi), dst_image)\n\n # to be checked\n detection_landmarks += [dts_landmark]\n\n del image\n return detection_images, detection_centers, detection_sizes, bbox_type, detection_landmarks, original_landmarks\n\n # @profile\n def _detect_faces_in_image_wrapper(self, frame_list, fid, out_detection_folder, out_landmark_folder, bb_outfile,\n centers_all, sizes_all, detection_fnames_all, landmark_fnames_all, \n out_landmarks_all=None, out_landmarks_orig_all=None, out_bbox_type_all=None):\n\n if isinstance(frame_list, (str, Path, list)):\\\n # if frame list is a list of image paths\n frame_fname = frame_list[fid]\n # detect faces in each frames\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(Path(self.output_dir) / frame_fname)\n elif isinstance(frame_list, (np.ndarray, types.GeneratorType)): \n # frame_list is an array of many images, or a generator (like a video reader)\n frame_fname =Path(f\"{fid:05d}.png\")\n if isinstance(frame_list, np.ndarray):\n frame = frame_list[fid]\n else: \n frame = next(frame_list)\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(frame)\n # if len(detection_ims) > 0: # debug visualization\n # imsave(frame_fname, detection_ims[0])\n \n # self.detection_lists[sequence_id][fid] += [detections]\n # import plotly.graph_objects as go\n # fig = go.Figure(data=go.Image(z=frame,))\n # fig.show()\n\n \n centers_all += [centers]\n sizes_all += [sizes]\n if out_landmarks_all is not None:\n out_landmarks_all += [landmarks]\n if out_landmarks_orig_all is not None:\n out_landmarks_orig_all += [orig_landmarks]\n if out_bbox_type_all is not None:\n out_bbox_type_all += [[bbox_type]*len(landmarks)]\n\n # save detections\n detection_fnames = []\n landmark_fnames = []\n for di, detection in enumerate(detection_ims):\n # save detection\n stem = frame_fname.stem + \"_%.03d\" % di\n if self.save_detection_images:\n out_detection_fname = out_detection_folder / (stem + self.processed_ext)\n detection_fnames += [out_detection_fname.relative_to(self.output_dir)]\n if self.processed_ext in ['.JPG', '.jpg', \".jpeg\", \".JPEG\"]:\n imsave(out_detection_fname, detection, quality=100)\n else:\n imsave(out_detection_fname, detection)\n # save landmarks\n if self.save_landmarks_frame_by_frame:\n if self.save_detection_images:\n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, landmarks[di], bbox_type)\n else: \n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, orig_landmarks[di], bbox_type)\n\n detection_fnames_all += [detection_fnames]\n landmark_fnames_all += [landmark_fnames]\n\n torch.cuda.empty_cache()\n checkpoint_frequency = 100\n if fid % checkpoint_frequency == 0:\n FaceDataModuleBase.save_detections(bb_outfile, detection_fnames_all, landmark_fnames_all,\n centers_all, sizes_all, fid)\n\n\n def _get_segmentation_method(self): \n return \"focus\"\n # return \"bisenet\"\n\n\n def _segment_images(self, detection_fnames_or_ims, out_segmentation_folder, path_depth = 0, landmarks=None, segmentation_net=None):\n import time\n # segmentation_net = segmentation_net or \"bisenet\"\n segmentation_net = segmentation_net or self._get_segmentation_method()\n if self.save_landmarks_one_file: \n overwrite = False \n # single_out_file = out_segmentation_folder / \"segmentations.pkl\"\n single_out_file = out_segmentation_folder / \"segmentations.hdf5\"\n if single_out_file.is_file() and not overwrite:\n print(f\"Segmentation already found in {single_out_file}, skipping\")\n return\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n net, seg_type, batch_size = self._get_segmentation_net(device, segmentation_net)\n\n # if self.save_detection_images:\n # ref_im = imread(detection_fnames_or_ims[0])\n # else: \n # ref_im = detection_fnames_or_ims[0]\n # ref_size = Resize((ref_im.shape[0], ref_im.shape[1]), interpolation=Image.NEAREST)\n ref_size = None\n\n # transforms = Compose([\n # Resize((512, 512)),\n # Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # ])\n transforms=None\n # batch_size = 16\n\n if isinstance(detection_fnames_or_ims, types.GeneratorType): \n im_read = \"skvreader\"\n elif isinstance(detection_fnames_or_ims, (FFmpegReader)):\n im_read = \"skvffmpeg\"\n else:\n im_read = 'pil' if not isinstance(detection_fnames_or_ims[0], np.ndarray) else None\n\n dataset = UnsupervisedImageDataset(detection_fnames_or_ims, image_transforms=transforms,\n landmark_list = landmarks,\n im_read=im_read)\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=4 if im_read not in [\"skvreader\", \"skvffmpeg\"] else 1, \n shuffle=False)\n\n # import matplotlib.pyplot as plt\n\n if self.save_segmentation_one_file: \n out_segmentation_names = []\n out_segmentations = []\n out_segmentation_types = []\n\n for i, batch in enumerate(tqdm(loader)):\n # facenet_pytorch expects this stanadrization for the input to the net\n # images = fixed_image_standardization(batch['image'].to(device))\n images = batch['image'].cuda()\n # start = time.time()\n with torch.no_grad():\n segmentation = net(images)\n # end = time.time()\n\n if ref_size is None:\n ref_size = Resize((images.shape[2], images.shape[3]), interpolation=Image.NEAREST)\n\n segmentation = ref_size(segmentation)\n segmentation = segmentation.cpu().numpy()\n\n if self.save_segmentation_frame_by_frame:\n start = time.time()\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n # if isinstance(out_segmentation_folder, list):\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = out_segmentation_folder / rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = out_segmentation_folder / (Path(image_path).stem + \".pkl\")\n segmentation_path.parent.mkdir(exist_ok=True, parents=True)\n # im = images[j]\n # im = im.permute(1,2,0).cpu().numpy()\n # from inferno.datasets.IO import process_segmentation \n # import matplotlib.pyplot as plt\n # from inferno.datasets.FaceVideoDataModule import FaceDataModuleBase\n # seg = process_segmentation(segmentation[j], seg_type)\n # imsave(\"seg.png\", seg)\n # imsave(\"im.png\", im)\n # FaceDataModuleBase.vis_parsing_maps(im, segmentation[j], stride=1, save_im=True,\n # save_path='overlay.png')\n # plt.figure()\n # plt.imshow(im)\n # plt.show()\n # plt.figure()\n # plt.imshow(seg[0])\n # plt.show()\n save_segmentation(segmentation_path, segmentation[j], seg_type)\n print(f\" Saving batch {i} took: {end - start}\")\n end = time.time()\n if self.save_segmentation_one_file: \n segmentation_names = []\n segmentations = []\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = Path(image_path).stem \n segmentation_names += [segmentation_path]\n segmentations += [segmentation[j]]\n out_segmentation_names += segmentation_names\n out_segmentations += segmentations\n out_segmentation_types += [seg_type] * len(segmentation_names)\n\n if self.save_landmarks_one_file: \n if single_out_file.suffix == \".pkl\":\n save_segmentation_list(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n elif single_out_file.suffix == \".hdf5\":\n save_segmentation_list_v2(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n print(\"Segmentation saved to %s\" % single_out_file)\n\n\n def _get_segmentation_net(self, device, method='bisenet'):\n if method == 'bisenet':\n seg_type = 'face_parsing'\n if hasattr(self, \"_bisenet\" ): \n net = self._bisenet\n else:\n from inferno.models.external.BiSeNetFaceParsing import BiSeNetFaceParsing\n net = BiSeNetFaceParsing()\n self._bisenet = net\n batch_size = 64\n elif method == \"gpen\": \n seg_type = 'face_parsing_gpen'\n if hasattr(self, \"_gpen\" ): \n net = self._gpen\n else:\n from inferno.models.external.GPENFaceParsing import GPENFaceParsing\n net = GPENFaceParsing()\n self._gpen = net\n batch_size = 16\n elif method == \"focus\": \n seg_type = 'face_segmentation_focus'\n if hasattr(self, \"_focus\" ): \n net = self._focus\n else:\n from inferno.models.external.FocusSegmentation import FocusSegmentation\n net = FocusSegmentation()\n self._focus = net\n batch_size = 16\n # batch_size = 16\n else: \n raise ValueError(f\"Unknown segmentation type: {method}\" )\n\n # from inferno.utils.other import get_path_to_externals\n # path_to_segnet = get_path_to_externals() / \"face-parsing.PyTorch\"\n # if not(str(path_to_segnet) in sys.path or str(path_to_segnet.absolute()) in sys.path):\n # sys.path += [str(path_to_segnet)]\n\n # from model import BiSeNet\n # n_classes = 19\n # net = BiSeNet(n_classes=n_classes)\n # # net.cuda()\n # save_pth = path_to_segnet / 'res' / 'cp' / '79999_iter.pth'\n # net.load_state_dict(torch.load(save_pth))\n # # net.eval()\n # net.eval().to(device)\n\n # labels = {\n # 0: 'background',\n # 1: 'skin',\n # 2: 'nose',\n # 3: 'eye_g',\n # 4: 'l_eye',\n # 5: 'r_eye',\n # 6: 'l_brow',\n # 7: 'r_brow',\n # 8: 'l_ear',\n # 9: 'r_ear',\n # 10: 'mouth',\n # 11: 'u_lip',\n # 12: 'l_lip',\n # 13: 'hair',\n # 14: 'hat',\n # 15: 'ear_r',\n # 16: 'neck_l',\n # 17: 'neck',\n # 18: 'cloth'\n # }\n\n return net, seg_type , batch_size\n\n\n @staticmethod\n def save_landmark_list(fname, landmarks):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n\n @staticmethod\n def load_landmark_list(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n return landmarks\n\n\n @staticmethod\n def save_landmark_list_v2(fname, landmarks, landmark_confidences, landmark_types):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n pkl.dump(landmark_confidences, f)\n pkl.dump(landmark_types, f)\n\n @staticmethod\n def load_landmark_list_v2(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n landmark_confidences = pkl.load(f)\n landmark_types = pkl.load(f)\n return landmarks, landmark_confidences, landmark_types\n\n\n @staticmethod\n def save_detections(fname, detection_fnames, landmark_fnames, centers, sizes, last_frame_id):\n with open(fname, \"wb\" ) as f:\n pkl.dump(detection_fnames, f)\n pkl.dump(centers, f)\n pkl.dump(sizes, f)\n pkl.dump(last_frame_id, f)\n pkl.dump(landmark_fnames, f)\n\n @staticmethod\n def load_detections(fname):\n with open(fname, \"rb\" ) as f:\n detection_fnames = pkl.load(f)\n centers = pkl.load(f)\n sizes = pkl.load(f)\n try:\n last_frame_id = pkl.load(f)\n except:\n last_frame_id = -1\n try:\n landmark_fnames = pkl.load(f)\n except:\n landmark_fnames = [None]*len(detection_fnames)\n\n return detection_fnames, landmark_fnames, centers, sizes, last_frame_id" }, { "identifier": "load_and_process_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def load_and_process_segmentation(path):\n seg_image, seg_type = load_segmentation(path)\n seg_image = seg_image[np.newaxis, :, :, np.newaxis]\n # end = timer()\n # print(f\"Segmentation reading took {end - start} s.\")\n\n # start = timer()\n seg_image = process_segmentation(\n seg_image, seg_type).astype(np.uint8)\n return seg_image" }, { "identifier": "process_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def process_segmentation(segmentation, seg_type, discarded_labels=None):\n if seg_type == \"face_parsing\":\n discarded_labels = discarded_labels or default_discarded_labels\n # start = timer()\n # segmentation_proc = np.ones_like(segmentation, dtype=np.float32)\n # for label in discarded_labels:\n # segmentation_proc[segmentation == label] = 0.\n segmentation_proc = np.isin(segmentation, discarded_labels)\n segmentation_proc = np.logical_not(segmentation_proc)\n segmentation_proc = segmentation_proc.astype(np.float32)\n # end = timer()\n # print(f\"Segmentation label discarding took {end - start}s\")\n return segmentation_proc\n elif seg_type == \"face_segmentation_focus\":\n segmentation = segmentation > 0.5 \n segmentation = segmentation.astype(np.float32)\n return segmentation\n else:\n raise ValueError(f\"Invalid segmentation type '{seg_type}'\")" }, { "identifier": "load_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation(filename):\n with open(filename, \"rb\") as f:\n seg = cpkl.load(f, compression='gzip')\n seg_type = seg[0]\n seg_image = seg[1]\n # seg_type = pkl.load(f)\n # seg_image = pkl.load(f)\n return seg_image, seg_type" }, { "identifier": "load_segmentation_list", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation_list(filename):\n try:\n with open(filename, \"rb\") as f:\n seg = cpkl.load(f, compression='gzip')\n seg_types = seg[0]\n seg_images = seg[1]\n seg_names = seg[2]\n except EOFError as e: \n print(f\"Error loading segmentation list: {filename}\")\n raise e\n return seg_images, seg_types, seg_names" }, { "identifier": "load_segmentation_list_v2", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation_list_v2(filename, start_frame=None, end_frame=None):\n with h5py.File(filename, 'r') as f:\n dset = f[\"frames\"]\n dset_types = f[\"frame_types\"]\n dset_names = f[\"frame_names\"]\n if start_frame is None:\n start_frame = 0\n if end_frame is None:\n end_frame = dset.shape[0]\n seg_images = dset[start_frame:end_frame]\n seg_types = dset_types[start_frame:end_frame]\n seg_names = dset_names[start_frame:end_frame]\n return seg_images, seg_types, seg_names" }, { "identifier": "load_reconstruction_list", "path": "inferno/datasets/IO.py", "snippet": "def load_reconstruction_list(filename, start_frame=None, end_frame=None):\n reconstructions = _load_hickle_file(filename, start_frame, end_frame)\n return reconstructions" }, { "identifier": "load_emotion_list", "path": "inferno/datasets/IO.py", "snippet": "def load_emotion_list(filename, start_frame=None, end_frame=None):\n emotions = _load_hickle_file(filename, start_frame, end_frame)\n return emotions" }, { "identifier": "load_reconstruction_list_v2", "path": "inferno/datasets/IO.py", "snippet": "def load_reconstruction_list_v2(filename, start_frame=None, end_frame=None):\n return _load_hdf5_dict(filename, start_frame, end_frame)" }, { "identifier": "load_emotion_list_v2", "path": "inferno/datasets/IO.py", "snippet": "def load_emotion_list_v2(filename, start_frame=None, end_frame=None):\n return _load_hdf5_dict(filename, start_frame, end_frame)" }, { "identifier": "bbox2point", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbox2point(left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n elif type == 'bbox':\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.12\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])\n elif type == \"mediapipe\":\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n else:\n raise NotImplementedError(f\" bbox2point not implemented for {type} \")\n if isinstance(center_x, np.ndarray):\n center = np.stack([center_x, center_y], axis=1)\n else: \n center = np.array([center_x, center_y])\n return old_size, center" }, { "identifier": "bbpoint_warp", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None, \n order=3 # order of interpolation, bicubic by default\n ):\n target_size_width = target_size_width or target_size_height\n tform = point2transform(center, size, target_size_height, target_size_width)\n tf = tform.inverse if inv else tform\n output_shape = output_shape or (target_size_height, target_size_width)\n dst_image = warp(image, tf, output_shape=output_shape, order=order)\n if landmarks is None:\n return dst_image\n # points need the matrix\n if isinstance(landmarks, np.ndarray):\n assert isinstance(landmarks, np.ndarray)\n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = tf_lmk(landmarks[:, :2])\n elif isinstance(landmarks, list): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = [] \n for i in range(len(landmarks)):\n dst_landmarks += [tf_lmk(landmarks[i][:, :2])]\n elif isinstance(landmarks, dict): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = {}\n for key, value in landmarks.items():\n dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])\n else: \n raise ValueError(\"landmarks must be np.ndarray, list or dict\")\n return dst_image, dst_landmarks" }, { "identifier": "load_landmark", "path": "inferno/utils/FaceDetector.py", "snippet": "def load_landmark(fname):\n with open(fname, \"rb\") as f:\n landmark_type = pkl.load(f)\n landmark = pkl.load(f)\n return landmark_type, landmark" }, { "identifier": "MEDIAPIPE_LANDMARK_NUMBER", "path": "inferno/layers/losses/MediaPipeLandmarkLosses.py", "snippet": "MEDIAPIPE_LANDMARK_NUMBER = 478" } ]
import torch import numpy as np import imgaug import pandas as pd import torch.nn.functional as F import subprocess import traceback import cv2 import timeit import cv2 import librosa import mediapipe as mp import plotly.express as px import plotly.graph_objects as go import pandas as pd from inferno.transforms.keypoints import KeypointNormalization, KeypointScale from inferno.utils.MediaPipeFaceOccluder import MediaPipeFaceOccluder, sizes_to_bb_batch from pathlib import Path from scipy.io import wavfile from python_speech_features import logfbank from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.IO import (load_and_process_segmentation, process_segmentation, load_segmentation, load_segmentation_list, load_segmentation_list_v2, load_reconstruction_list, load_emotion_list, load_reconstruction_list_v2, load_emotion_list_v2, ) from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.utils.FaceDetector import load_landmark from skvideo.io import vread, vreader, FFmpegReader from inferno.layers.losses.MediaPipeLandmarkLosses import MEDIAPIPE_LANDMARK_NUMBER from decord import VideoReader, cpu from inferno.models.mica.MicaInputProcessing import MicaInputProcessor from munch import Munch from inferno.models.temporal.Preprocessors import FlamePreprocessor from tqdm import auto from inferno.utils.MediaPipeLandmarkDetector import np2mediapipe from inferno.utils.DecaUtils import tensor_vis_landmarks
13,194
else: # landmarks are saved per frame landmark_validity = np.ones((len(landmarks), 1), dtype=np.float32) for i in range(start_frame, sequence_length + start_frame): landmark_path = landmarks_dir / f"{i:05d}_000.pkl" landmark_type, landmark = load_landmark(landmark_path) landmarks += [landmark] if len(landmark) == 0: # dropped detection landmark = [0, 0] landmark_validity[li] = 0. elif len(landmark) > 1: # multiple faces detected landmarks[li] = landmarks[li][0] # just take the first one for now else: landmark[li] = landmarks[li][0] landmarks = np.stack(landmarks, axis=0) # if landmark_type == "mediapipe" and self.align_images: # # # [WARNING] mediapipe landmarks coordinates are saved in the scale [0.0-1.0] (for absolute they need to be multiplied by img size) # # # landmarks -= 0.5 # # landmarks -= 1. # landmarks *= 2 # # # landmarks *= 2 # landmarks -= 1 # pad landmarks with zeros if necessary to match the desired video length if landmarks.shape[0] < sequence_length: landmarks = np.concatenate([landmarks, np.zeros( (sequence_length - landmarks.shape[0], *landmarks.shape[1:]), dtype=landmarks.dtype)], axis=0) landmark_validity = np.concatenate([landmark_validity, np.zeros((sequence_length - landmark_validity.shape[0], 1), dtype=landmark_validity.dtype)], axis=0) landmark_dict[landmark_type] = landmarks.astype(np.float32) landmark_validity_dict[landmark_type] = landmark_validity sample["landmarks"] = landmark_dict sample["landmarks_validity"] = landmark_validity_dict return sample def _path_to_segmentations(self, index): return (Path(self.output_dir) / f"segmentations_{self.segmentation_source}" / self.segmentation_type / self.video_list[self.video_indices[index]]).with_suffix("") def _read_segmentations(self, index, start_frame=None, end_frame=None): segmentations_dir = self._path_to_segmentations(index) if (segmentations_dir / "segmentations.hdf5").exists(): # if random access hdf5 exists (newest), let's use it segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle (no random access) segmentations, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") if start_frame is not None and end_frame is not None: segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] if isinstance(segmentations, list): segmentations = np.stack(segmentations, axis=0) if segmentations.ndim == 4: # T, C=1, W, H segmentations = segmentations[:,0,...] if isinstance(seg_types[0], bytes): seg_types = [seg_type.decode("utf-8") for seg_type in seg_types] if isinstance(seg_names[0], bytes): seg_names = [seg_name.decode("utf-8") for seg_name in seg_names] return segmentations, seg_types, seg_names def _retrieve_segmentations(self, index, start_frame, end_frame): if not self.preload_videos: # segmentations_dir = self._path_to_segmentations(index) # if (segmentations_dir / "segmentations.hdf5").exists(): # random access hdf5 exists, let's use it # segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) # elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle # seg_images, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") # segmentations = seg_images[start_frame: end_frame] # if isinstance(seg_images, list): # segmentations = np.stack(seg_images, axis=0) # if seg_images.ndim == 4: # T, C=1, W, H # segmentations = segmentations[:,0,...] segmentations, seg_types, seg_names = self._read_segmentations(index, start_frame, end_frame) return segmentations, seg_types, seg_names else: video_path = str(self._get_video_path(index)) segmentations, seg_types, seg_names = self.seg_cache[video_path] segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] return segmentations, seg_types, seg_names def _load_reconstructions(self, index, rec_type, appearance=False, start_frame=None, end_frame=None): reconstructions_dir = self._path_to_reconstructions(index, rec_type) if (reconstructions_dir / "shape_pose_cam.hdf5").exists(): # random access hdf5 exists, let's use it shape_pose_cam = load_reconstruction_list_v2(reconstructions_dir / "shape_pose_cam.hdf5", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list_v2(reconstructions_dir / "appearance.hdf5", start_frame=start_frame, end_frame=end_frame) else: appearance = None elif (reconstructions_dir / "shape_pose_cam.pkl").exists(): # reconstructions are saved in a single pickle shape_pose_cam = load_reconstruction_list(reconstructions_dir / "shape_pose_cam.pkl", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list(reconstructions_dir / "appearance.pkl", start_frame=start_frame, end_frame=end_frame) else: appearance = None ## should no longer be necessary as the start/end frame is now handled in the load_reconstruction_list function # if start_frame is not None and end_frame is not None: # shape_pose_cam = {key: shape_pose_cam[key][:, start_frame: end_frame] for key in shape_pose_cam.keys()} # if appearance is not None: # appearance = {key: appearance[key][:, start_frame: end_frame] for key in appearance.keys()} else: raise RuntimeError(f"Reconstruction file not found in {reconstructions_dir}") # for key in shape_pose_cam.keys(): # shape_pose_cam[key] = np.copy(shape_pose_cam[key]) # for key in appearance.keys(): # appearance[key] = np.copy(appearance[key]) return shape_pose_cam, appearance def _load_emotions(self, index, features=False, start_frame=None, end_frame=None): emotions_dir = self._path_to_emotions(index) if (emotions_dir / "emotions.hdf5").exists(): # random access hdf5 exists, let's use it
""" Author: Radek Danecek Copyright (c) 2023, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at [email protected] # For commercial licensing contact, please contact [email protected] """ class AbstractVideoDataset(torch.utils.data.Dataset): def __init__(self) -> None: super().__init__() def _augment_sequence_sample(self, index, sample): raise NotImplementedError() def visualize_sample(self, sample): raise NotImplementedError() class VideoDatasetBase(AbstractVideoDataset): def __init__(self, root_path, output_dir, video_list, video_metas, video_indices, # audio_paths, audio_metas, sequence_length, audio_noise_prob=0.0, stack_order_audio=4, audio_normalization="layer_norm", landmark_types="mediapipe", segmentation_type = "bisenet", landmark_source = "original", segmentation_source = "original", occlusion_length=0, occlusion_probability_mouth = 0.0, occlusion_probability_left_eye = 0.0, occlusion_probability_right_eye = 0.0, occlusion_probability_face = 0.0, image_size=None, ## the image size that the dataset will output transforms : imgaug.augmenters.Augmenter = None, hack_length=False, use_original_video=True, include_processed_audio = True, include_raw_audio = True, temporal_split_start=None, # if temporally splitting the video (train, val, test), this is the start of the split temporal_split_end=None, # if temporally splitting the video (train, val, test), this is the end of the split preload_videos=False, # cache all videos in memory (recommended for smaller datasets) inflate_by_video_size=False, include_filename=False, # if True includes the filename of the video in the sample align_images = True, use_audio=True, #if True, includes audio in the sample reconstruction_type = None, return_global_pose = False, return_appearance = False, average_shape_decode = False, emotion_type = None, return_emotion_feature=False, read_video=True, read_audio=True, original_image_size=None, ## the processed videos may be different in size and if they are, the landmarks will be, too. This is to remember return_mica_images=False, ) -> None: super().__init__() self.root_path = root_path self.output_dir = output_dir self.video_list = video_list self.video_indices = video_indices self.video_metas = video_metas self.sequence_length = sequence_length or 1 # self.audio_paths = audio_paths self.audio_metas = audio_metas self.audio_noise_prob = audio_noise_prob self.image_size = image_size self.original_image_size = original_image_size or image_size self.scale = 1.25 # if the video is 25 fps and the audio is 16 kHz, stack_order_audio corresponds to 4 # (i.e. 4 consecutive filterbanks will be concatenated to sync with the visual frame) self.stack_order_audio = stack_order_audio self.audio_normalization = audio_normalization self.landmark_types = landmark_types if isinstance(self.landmark_types, str): self.landmark_types = [self.landmark_types] self.landmark_source = landmark_source if isinstance(self.landmark_source, str): self.landmark_source = [self.landmark_source] * len(self.landmark_types) assert len(self.landmark_types) == len(self.landmark_source), "landmark_types and landmark_source must have the same length" self.segmentation_type = segmentation_type self.segmentation_source = segmentation_source self.landmark_normalizer = KeypointNormalization() # postprocesses final landmarks to be in [-1, 1] self.occluder = MediaPipeFaceOccluder() self.occlusion_probability_mouth = occlusion_probability_mouth self.occlusion_probability_left_eye = occlusion_probability_left_eye self.occlusion_probability_right_eye = occlusion_probability_right_eye self.occlusion_probability_face = occlusion_probability_face self.occlusion_length = occlusion_length if isinstance(self.occlusion_length, int): self.occlusion_length = [self.occlusion_length, self.occlusion_length+1] # self.occlusion_length = [20, 30] self.occlusion_length = sorted(self.occlusion_length) self.include_processed_audio = include_processed_audio self.include_raw_audio = include_raw_audio # self.align_images = True # self.align_images = False self.align_images = align_images self.use_audio = use_audio self.use_original_video = use_original_video self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.hack_length = hack_length if self.hack_length == "auto": if self._true_len() < 64: # hacks the length for supersmall test datasets self.hack_length = (64 // self._true_len()) if 64 % self._true_len() != 0: self.hack_length += 1 self.hack_length = float(self.hack_length) # useful hack to repeat the elements in the dataset for really small datasets else: self.hack_length = False assert self.occlusion_length[0] >= 0 # assert self.occlusion_length[1] <= self.sequence_length + 1 self.temporal_split_start = temporal_split_start self.temporal_split_end = temporal_split_end self.preload_videos = preload_videos self.inflate_by_video_size = inflate_by_video_size # self.read_video = True self.read_video = read_video self.read_audio = read_audio self.reconstruction_type = reconstruction_type if self.reconstruction_type is not None: if isinstance(self.reconstruction_type, str): self.reconstruction_type = [self.reconstruction_type] assert isinstance(self.reconstruction_type, list), "reconstruction_type must be a list or None" self.return_global_pose = return_global_pose self.return_appearance = return_appearance self.average_shape_decode = average_shape_decode self.emotion_type = emotion_type self.return_emotion_feature = return_emotion_feature self.video_cache = {} self.audio_cache = {} self.seg_cache = {} self.lmk_cache = {} self.rec_cache = {} self.emo_cache = {} if self.preload_videos: self._preload_videos() self.video_sample_indices = None if self.inflate_by_video_size: self._inflate_by_video_size() self.include_filename = include_filename # if True, face alignment will not crash if invalid. By default this should be False to avoid silent data errors self._allow_alignment_fail = False self.return_mica_image = return_mica_images if not self.read_video: assert not bool(self.return_mica_image), "return_mica_image is only supported when read_video is True" if bool(self.return_mica_image): if self.return_mica_image is True: self.return_mica_image = "fan" self.mica_preprocessor = MicaInputProcessor(self.return_mica_image) @property def invalid_cutoff(self): max_cutoff = 0 for rec_type in self.reconstruction_type: if rec_type == "spectre": max_cutoff = max(max_cutoff, 2 ) elif rec_type in ["emoca", "deca"] or "emoca" in rec_type.lower() or "emica" in rec_type.lower() or "edeca" in rec_type.lower(): max_cutoff = max(max_cutoff, 0 ) else: raise ValueError(f"Invalid reconstruction type: '{rec_type}'") return max_cutoff def _load_flame(self): if self.reconstruction_type is not None: flame_cfg = Munch() flame_cfg.type = "flame" flame_cfg.flame = Munch({ "flame_model_path": "/ps/scratch/rdanecek/data/FLAME/geometry/generic_model.pkl" , "n_shape": 100 , # n_exp: 100 "n_exp": 50, "flame_lmk_embedding_path": "/ps/scratch/rdanecek/data/FLAME/geometry/landmark_embedding.npy" }) flame_cfg.use_texture = False self.flame = FlamePreprocessor(flame_cfg) # prep = prep.to("cuda") def _preload_videos(self): # indices = np.unique(self.video_indices) for i in auto.tqdm( range(len(self.video_indices)), desc="Preloading videos" ): video_path = str(self._get_video_path(i)) if self.read_video: if video_path not in self.video_cache: self.video_cache[video_path] = vread(video_path) self.seg_cache[video_path] = self._read_segmentations(i) if self.read_audio: if video_path not in self.audio_cache: self.audio_cache[i] = self._read_audio(i) for lmk_type, lmk_source in zip(self.landmark_types, self.landmark_source): if i not in self.lmk_cache: self.lmk_cache[i] = {} if lmk_type not in self.lmk_cache[i]: self.lmk_cache[i][lmk_type] = {} # if lmk_source not in self.lmk_cache[i][lmk_type]: self.lmk_cache[i][lmk_type][lmk_source] = self._read_landmarks(i, lmk_type, lmk_source) if self.reconstruction_type is not None: for rec_type in self.reconstruction_type: shape_pose_cam, appearance = self._load_reconstructions(i, rec_type, self.return_appearance) if i not in self.rec_cache: self.rec_cache[i] = {} video_dict = self.rec_cache[i] if rec_type not in video_dict: video_dict[rec_type] = {} self.rec_cache[i][rec_type]["shape_pose_cam"] = shape_pose_cam self.rec_cache[i][rec_type]["appearance"] = appearance if self.emotion_type is not None: emotions, features = self._load_emotions(i, features=self.return_emotion_feature) if i not in self.emo_cache: self.emo_cache[i] = {} self.emo_cache[i]["emotions"] = emotions self.emo_cache[i]["features"] = features print("Video cache loaded") def _inflate_by_video_size(self): assert isinstance( self.sequence_length, int), "'sequence_length' must be an integer when inflating by video size" inflated_video_indices = [] video_sample_indices = [] for i in range(len(self.video_indices)): # for i in self.video_indices: idx = self.video_indices[i] num_frames = self._get_num_frames(i) if self.temporal_split_start is not None and self.temporal_split_end is not None: num_frames = int((self.temporal_split_end - self.temporal_split_start) * num_frames) num_samples_in_video = num_frames // self.sequence_length if num_frames % self.sequence_length != 0: num_samples_in_video += 1 num_samples_in_video = max(1, num_samples_in_video) inflated_video_indices += [idx] * num_samples_in_video video_sample_indices += list(range(num_samples_in_video)) self.video_indices = np.array(inflated_video_indices, dtype=np.int32) self.video_sample_indices = np.array(video_sample_indices, dtype=np.int32) def __getitem__(self, index): # max_attempts = 10 max_attempts = 50 for i in range(max_attempts): try: return self._getitem(index) except AssertionError as e: if not hasattr(self, "num_total_failed_attempts"): self.num_total_failed_attempts = 0 old_index = index index = np.random.randint(0, self.__len__()) tb = traceback.format_exc() if self.num_total_failed_attempts % 50 == 0: print(f"[ERROR] AssertionError in {self.__class__.__name__} dataset while retrieving sample {old_index}, retrying with new index {index}") print(f"In total, there has been {self.num_total_failed_attempts} failed attempts. This number should be very small. If it's not, check the data.") print("See the exception message for more details.") print(tb) self.num_total_failed_attempts += 1 print("[ERROR] Failed to retrieve sample after {} attempts".format(max_attempts)) raise RuntimeError("Failed to retrieve sample after {} attempts".format(max_attempts)) def _getitem(self, index): time = False if time: start_time = timeit.default_timer() if self.hack_length: index = index % self._true_len() # 1) VIDEO # load the video sample, start_frame, num_read_frames, video_fps, num_frames, num_available_frames = self._get_video(index) if time: video_read_time = timeit.default_timer() - start_time # 2) AUDIO if self.read_audio: sample = self._get_audio(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: audio_read_time = timeit.default_timer() - start_time - video_read_time # 3) LANDMARKS sample = self._get_landmarks(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: lmk_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time # 4) SEGMENTATIONS if self.read_video: sample = self._get_segmentations(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: seg_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time # 5) FACE ALIGNMENT IF ANY if self.read_video: sample = self._align_faces(index, sample) if time: face_align_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time # 6) GEOMETRY if self.reconstruction_type is not None: sample = self._get_reconstructions(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: geom_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time # 7) EMOTION if self.emotion_type is not None: sample = self._get_emotions(index, start_frame, num_read_frames, video_fps, num_frames, sample) if time: emo_read_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time - geom_read_time # 8) AUGMENTATION if self.read_video: sample = self._augment_sequence_sample(index, sample) if time: aug_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time - geom_read_time - emo_read_time # TO TORCH sample = to_torch(sample) # AUDIO NORMALIZATION (if any), this is a remnant from av-hubert and is not being used anywhere, will be removed in the future if self.read_audio: if self.include_processed_audio: if self.audio_normalization is not None: if self.audio_normalization == "layer_norm": sample["audio"] = F.layer_norm(sample["audio"], sample["audio"].shape[1:]) else: raise ValueError(f"Unsupported audio normalization {self.audio_normalization}") # audio_process_time = timeit.default_timer() - start_time - video_read_time - audio_read_time - lmk_read_time - seg_read_time - face_align_time - geom_read_time - emo_read_time - aug_time if self.read_video: # T,H,W,C to T,C,H,W sample["video"] = sample["video"].permute(0, 3, 1, 2) if "video_masked" in sample.keys(): sample["video_masked"] = sample["video_masked"].permute(0, 3, 1, 2) # sample["segmenation"] = sample["segmenation"].permute(0, 2, 1) # sample["segmentation_masked"] = sample["segmentation_masked"].permute(0, 2, 1) if self.return_mica_image: fan_landmarks = None landmarks_validity = None if "landmarks" in sample.keys(): if isinstance(sample["landmarks"], dict): if "fan3d" in sample["landmarks"].keys(): fan_landmarks = sample["landmarks"]["fan3d"] landmarks_validity = sample["landmarks_validity"]["fan3d"] elif "fan" in sample["landmarks"].keys(): fan_landmarks = sample["landmarks"]["fan"] landmarks_validity = sample["landmarks_validity"]["fan"] elif isinstance(sample["landmarks"], (np.ndarray, torch.Tensor)): if sample["landmarks"].shape[1] == 68: fan_landmarks = sample["landmarks"] landmarks_validity = sample["landmarks_validity"] sample["mica_video"] = self.mica_preprocessor(sample["video"], fan_landmarks, landmarks_validity=landmarks_validity) sample["mica_video_masked"] = self.mica_preprocessor(sample["video_masked"], fan_landmarks, landmarks_validity=landmarks_validity) # # normalize landmarks # if self.landmark_normalizer is not None: # if isinstance(self.landmark_normalizer, KeypointScale): # raise NotImplementedError("Landmark normalization is deprecated") # self.landmark_normalizer.set_scale( # img.shape[0] / input_img_shape[0], # img.shape[1] / input_img_shape[1]) # elif isinstance(self.landmark_normalizer, KeypointNormalization): # self.landmark_normalizer.set_scale(sample["video"].shape[2], sample["video"].shape[3]) # else: # raise ValueError(f"Unsupported landmark normalizer type: {type(self.landmark_normalizer)}") # for key in sample["landmarks"].keys(): # sample["landmarks"][key] = self.landmark_normalizer(sample["landmarks"][key]) if time: print(f"Video read time: {video_read_time:.2f} s") print(f"Audio read time: {audio_read_time:.2f} s") print(f"Landmark read time: {lmk_read_time:.2f} s") print(f"Segmentation read time: {seg_read_time:.2f} s") print(f"Face alignment time: {face_align_time:.2f} s") print(f"Geometry read time: {geom_read_time:.2f} s") print(f"Emotion read time: {emo_read_time:.2f} s") print(f"Augmentation time: {aug_time:.2f} s") # print(f"Audio process time: {audio_process_time:.2f} s") print(f"Total read time: {timeit.default_timer() - start_time:.2f} s") return sample def _get_video_path(self, index): if self.use_original_video: video_path = self.root_path / self.video_list[self.video_indices[index]] else: video_path = Path(self.output_dir) / "videos_aligned" / self.video_list[self.video_indices[index]] return video_path def _get_audio_path(self, index): audio_path = (Path(self.output_dir) / "audio" / self.video_list[self.video_indices[index]]).with_suffix(".wav") return audio_path def _get_num_frames(self, index): video_meta = self.video_metas[self.video_indices[index]] # print("Video path: ", video_path) # num video frames num_frames = video_meta["num_frames"] video_path = self._get_video_path(index) if num_frames == 0: # use ffprobe to get the number of frames num_frames = int(subprocess.check_output(["ffprobe", "-v", "error", "-select_streams", "v:0", "-count_packets", "-show_entries", "stream=nb_read_packets", "-of", "csv=p=0", str(video_path)])) if num_frames == 0: _vr = FFmpegReader(str(video_path)) num_frames = _vr.getShape()[0] del _vr return num_frames def _get_sample_length(self, index): if isinstance(self.sequence_length, int): # if sequence length set, use it return self.sequence_length elif isinstance(self.sequence_length, str): # otherwise use the one from the metadata if self.sequence_length == "all": if self.temporal_split_start is not None and self.temporal_split_end is not None: num_frames = self._get_num_frames(index) temporal_split_start_frame = int(self.temporal_split_start * num_frames) temporal_split_end_frame = int(self.temporal_split_end * num_frames) return temporal_split_end_frame - temporal_split_start_frame else: num_frames = self._get_num_frames(index) else: raise ValueError(f"Unsupported sequence length value: '{self.sequence_length}'") return num_frames raise def _get_video(self, index): video_path = self._get_video_path(index) video_meta = self.video_metas[self.video_indices[index]] # print("Video path: ", video_path) # num video frames num_frames = self._get_num_frames(index) assert num_frames > 0, "Number of frames is 0 for video {}".format(video_path) video_fps = video_meta["fps"] n1, n2 = video_fps.split("/") n1 = int(n1) n2 = int(n2) assert n1 % n2 == 0 video_fps = n1 // n2 # assert num_frames >= self.sequence_length, f"Video {video_path} has only {num_frames} frames, but sequence length is {self.sequence_length}" # TODO: handle the case when sequence length is longer than the video length sequence_length = self._get_sample_length(index) # pick the starting video frame if self.temporal_split_start is not None and self.temporal_split_end is not None: temporal_split_start = int(self.temporal_split_start * num_frames) temporal_split_end = int(self.temporal_split_end * num_frames) num_available_frames = temporal_split_end - temporal_split_start # start_frame = np.random.randint(temporal_split_start, temporal_split_end - sequence_length) else: temporal_split_start = 0 temporal_split_end = num_frames num_available_frames = num_frames if num_available_frames <= sequence_length: start_frame = temporal_split_start else: if self.video_sample_indices is None: # one video is one sample start_frame = np.random.randint(temporal_split_start, temporal_split_end - sequence_length) else: # one video is multiple samples (as many as the sequence length allows without repetition) start_frame = temporal_split_start + (self.video_sample_indices[index] * sequence_length) # start_frame = np.random.randint(0, num_frames - sequence_length) sample = {} if self.include_filename: sample["filename"] = str(video_path) sample["fps"] = video_fps # include the fps in the sample # TODO: picking the starting frame should probably be done a bit more robustly # (e.g. by ensuring the sequence has at least some valid landmarks) ... # maybe the video should be skipped altogether if it can't provide that # load the frames # frames = [] # for i in range(start_frame, start_frame + sequence_length): # frame_path = video_path / f"frame_{i:04d}.jpg" # frame = imread(str(frame_path)) # frames.append(frame) assert video_path.is_file(), f"Video {video_path} does not exist" num_read_frames = self._get_sample_length(index) num_read_frames_ = self._get_sample_length(index) if self.read_video: num_read_frames = 0 try: if not self.preload_videos: # import timeit # start_time = timeit.default_timer() # frames = vread(video_path.as_posix()) # end_time = timeit.default_timer() # print(f"Video read time: {end_time - start_time:.2f} s") # from decord import VideoReader # from decord import cpu, gpu # start_time = timeit.default_timer() vr = VideoReader(video_path.as_posix(), ctx=cpu(0), width=self.image_size, height=self.image_size) if len(vr) < sequence_length: sequence_length_ = len(vr) else: sequence_length_ = sequence_length frames = vr.get_batch(range(start_frame,(start_frame + sequence_length_))) frames = frames.asnumpy() if sequence_length_ < sequence_length: # pad with zeros if video shorter than sequence length frames = np.concatenate([frames, np.zeros((sequence_length - frames.shape[0], frames.shape[1], frames.shape[2], frames.shape[3]), dtype=frames.dtype)]) # end_time = timeit.default_timer() # print(f"Video read time: {end_time - start_time:.2f} s") else: frames = self.video_cache[video_path.as_posix()] assert len(frames) == num_frames, f"Video {video_path} has {len(frames)} frames, but meta says it has {num_frames}" frames = frames[start_frame:(start_frame + sequence_length)] num_read_frames = frames.shape[0] # # plot frames # import matplotlib.pyplot as plt # frame_idx = 0 # plt.figure() # plt.imshow(frames[frame_idx]) # plt.show() if frames.shape[0] < sequence_length: # pad with zeros if video shorter than sequence length frames = np.concatenate([frames, np.zeros((sequence_length - frames.shape[0], frames.shape[1], frames.shape[2]), dtype=frames.dtype)]) except ValueError: # reader = vreader(video_path.as_posix()) # create an opencv video reader reader = cv2.VideoCapture(video_path.as_posix()) fi = 0 frames = [] while fi < start_frame: fi += 1 # _ = next(reader) _, frame = reader.read() for i in range(sequence_length): # frames.append(next(reader)) if reader.isOpened(): _, frame = reader.read() if frame is None: # frame = np.zeros((self.image_size, self.image_size, 3), dtype=np.uint8) frame = np.zeros_like(frames[0]) frames.append(frame) continue num_read_frames += 1 # bgr to rgb frame = frame[:, :, ::-1] else: # if we ran out of frames, pad with black frame = np.zeros_like(frames[0]) frames.append(frame) reader.release() frames = np.stack(frames, axis=0) frames = frames.astype(np.float32) / 255.0 # sample = { sample["video"] = frames sample["frame_indices"] = np.arange(start_frame, start_frame + sequence_length, dtype=np.int32) if num_read_frames_ != num_read_frames: print(f"[Warning]: read {num_read_frames} frames instead of {num_read_frames_} for video {video_path}") return sample, start_frame, num_read_frames, video_fps, num_frames, num_available_frames def _read_audio(self, index): # audio_path = (Path(self.output_dir) / "audio" / self.video_list[self.video_indices[index]]).with_suffix(".wav") audio_path = self._get_audio_path(index) # audio_meta = self.audio_metas[self.video_indices[index]] # load the audio # if self.include_raw_audio: sampling_rate = 16000 wavdata, sampling_rate = librosa.load(audio_path, sr=sampling_rate) # wavdata, sampling_rate = librosa.load(audio_path, sr=sampling_rate) if wavdata.ndim > 1: wavdata = librosa.to_mono(wavdata) wavdata = (wavdata.astype(np.float64) * 32768.0).astype(np.int16) return wavdata, sampling_rate def _get_audio(self, index, start_frame, num_read_frames, video_fps, num_frames, sample): if self.preload_videos: wavdata, sampling_rate = self.audio_cache[index] else: wavdata, sampling_rate = self._read_audio(index) sequence_length = self._get_sample_length(index) # audio augmentation if np.random.rand() < self.audio_noise_prob: wavdata = self.add_noise(wavdata) if self.include_processed_audio: # sampling_rate, wavdata = wavfile.read(audio_path.as_posix()) # assert samplerate == 16000 and len(wavdata.shape) == 1 audio_feats = logfbank(wavdata, samplerate=sampling_rate).astype(np.float32) # [T (num audio frames), F (num filters)] # the audio feats frequency (and therefore num frames) is too high, so we stack them together to match num visual frames audio_feats = stacker(audio_feats, self.stack_order_audio) # audio_feats = audio_feats[start_frame:(start_frame + sequence_length)] audio_feats = audio_feats[start_frame:(start_frame + num_read_frames)] # temporal pad with zeros if necessary to match the desired video length if audio_feats.shape[0] < sequence_length: # concatente with zeros audio_feats = np.concatenate([audio_feats, np.zeros((sequence_length - audio_feats.shape[0], audio_feats.shape[1]), dtype=audio_feats.dtype)], axis=0) # stack the frames and audio feats together sample["audio"] = audio_feats if self.include_raw_audio: assert sampling_rate % video_fps == 0 wav_per_frame = sampling_rate // video_fps wavdata_ = np.zeros((num_frames, wav_per_frame), dtype=wavdata.dtype) wavdata_ = wavdata_.reshape(-1) if wavdata.size > wavdata_.size: wavdata_[...] = wavdata[:wavdata_.size] else: wavdata_[:wavdata.size] = wavdata wavdata_ = wavdata_.reshape((num_frames, wav_per_frame)) wavdata_ = wavdata_[start_frame:(start_frame + num_read_frames)] if wavdata_.shape[0] < sequence_length: # concatente with zeros wavdata_ = np.concatenate([wavdata_, np.zeros((sequence_length - wavdata_.shape[0], wavdata_.shape[1]), dtype=wavdata_.dtype)], axis=0) wavdata_ = wavdata_.astype(np.float64) / np.int16(np.iinfo(np.int16).max) # wavdata_ = np.zeros((sequence_length, samplerate // video_fps), dtype=wavdata.dtype) # wavdata_ = np.zeros((n * frames.shape[0]), dtype=wavdata.dtype) # wavdata_[:wavdata.shape[0]] = wavdata # wavdata_ = wavdata_.reshape((frames.shape[0], -1)) sample["raw_audio"] = wavdata_ sample["samplerate"] = sampling_rate return sample def _path_to_landmarks(self, index, landmark_type, landmark_source): return (Path(self.output_dir) / f"landmarks_{landmark_source}" / landmark_type / self.video_list[self.video_indices[index]]).with_suffix("") def _read_landmarks(self, index, landmark_type, landmark_source): landmarks_dir = self._path_to_landmarks(index, landmark_type, landmark_source) landmark_list = FaceDataModuleBase.load_landmark_list(landmarks_dir / f"landmarks_{landmark_source}.pkl") return landmark_list def _get_landmarks(self, index, start_frame, num_read_frames, video_fps, num_frames, sample): sequence_length = self._get_sample_length(index) landmark_dict = {} landmark_validity_dict = {} for lti, landmark_type in enumerate(self.landmark_types): landmark_source = self.landmark_source[lti] landmarks_dir = self._path_to_landmarks(index, landmark_type, landmark_source) landmarks = [] if (landmarks_dir / "landmarks.pkl").exists(): # landmarks are saved per video in a single file # landmark_list = FaceDataModuleBase.load_landmark_list(landmarks_dir / "landmarks.pkl") # landmark_list = FaceDataModuleBase.load_landmark_list(landmarks_dir / "landmarks_original.pkl") if not self.preload_videos: # landmark_list = FaceDataModuleBase.load_landmark_list(landm?arks_dir / f"landmarks_{landmark_source}.pkl") landmark_list = self._read_landmarks(index, landmark_type, landmark_source) # landmark_types = FaceDataModuleBase.load_landmark_list(landmarks_dir / "landmark_types.pkl") else: landmark_list = self.lmk_cache[index][landmark_type][landmark_source] # landmark_types = self.lmk_cache[index]["landmark_types"] landmarks = landmark_list[start_frame: sequence_length + start_frame] landmark_validity = np.ones((len(landmarks), 1), dtype=np.float32) for li in range(len(landmarks)): if len(landmarks[li]) == 0: # dropped detection if landmark_type == "mediapipe": # [WARNING] mediapipe landmarks coordinates are saved in the scale [0.0-1.0] (for absolute they need to be multiplied by img size) landmarks[li] = np.zeros((MEDIAPIPE_LANDMARK_NUMBER, 3)) elif landmark_type in ["fan", "kpt68"]: landmarks[li] = np.zeros((68, 2)) else: raise ValueError(f"Unknown landmark type '{landmark_type}'") landmark_validity[li] = 0. elif len(landmarks[li]) > 1: # multiple faces detected landmarks[li] = landmarks[li][0] # just take the first one for now else: \ landmarks[li] = landmarks[li][0] # # pad landmarks with zeros if necessary to match the desired video length # # if landmarks.shape[0] < sequence_length: # if len(landmarks) < sequence_length: # # concatente with zeros # landmarks += [np.zeros((landmarks.shape[1]))] * (sequence_length - len(landmarks)) # landmarks = np.concatenate([landmarks, np.zeros((sequence_length - landmarks.shape[0], landmarks.shape[1]))], axis=0) # landmark_validity = np.concatenate([landmark_validity, np.zeros((sequence_length - landmark_validity.shape[0]), dtype=np.bool)], axis=0) else: # landmarks are saved per frame landmark_validity = np.ones((len(landmarks), 1), dtype=np.float32) for i in range(start_frame, sequence_length + start_frame): landmark_path = landmarks_dir / f"{i:05d}_000.pkl" landmark_type, landmark = load_landmark(landmark_path) landmarks += [landmark] if len(landmark) == 0: # dropped detection landmark = [0, 0] landmark_validity[li] = 0. elif len(landmark) > 1: # multiple faces detected landmarks[li] = landmarks[li][0] # just take the first one for now else: landmark[li] = landmarks[li][0] landmarks = np.stack(landmarks, axis=0) # if landmark_type == "mediapipe" and self.align_images: # # # [WARNING] mediapipe landmarks coordinates are saved in the scale [0.0-1.0] (for absolute they need to be multiplied by img size) # # # landmarks -= 0.5 # # landmarks -= 1. # landmarks *= 2 # # # landmarks *= 2 # landmarks -= 1 # pad landmarks with zeros if necessary to match the desired video length if landmarks.shape[0] < sequence_length: landmarks = np.concatenate([landmarks, np.zeros( (sequence_length - landmarks.shape[0], *landmarks.shape[1:]), dtype=landmarks.dtype)], axis=0) landmark_validity = np.concatenate([landmark_validity, np.zeros((sequence_length - landmark_validity.shape[0], 1), dtype=landmark_validity.dtype)], axis=0) landmark_dict[landmark_type] = landmarks.astype(np.float32) landmark_validity_dict[landmark_type] = landmark_validity sample["landmarks"] = landmark_dict sample["landmarks_validity"] = landmark_validity_dict return sample def _path_to_segmentations(self, index): return (Path(self.output_dir) / f"segmentations_{self.segmentation_source}" / self.segmentation_type / self.video_list[self.video_indices[index]]).with_suffix("") def _read_segmentations(self, index, start_frame=None, end_frame=None): segmentations_dir = self._path_to_segmentations(index) if (segmentations_dir / "segmentations.hdf5").exists(): # if random access hdf5 exists (newest), let's use it segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle (no random access) segmentations, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") if start_frame is not None and end_frame is not None: segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] if isinstance(segmentations, list): segmentations = np.stack(segmentations, axis=0) if segmentations.ndim == 4: # T, C=1, W, H segmentations = segmentations[:,0,...] if isinstance(seg_types[0], bytes): seg_types = [seg_type.decode("utf-8") for seg_type in seg_types] if isinstance(seg_names[0], bytes): seg_names = [seg_name.decode("utf-8") for seg_name in seg_names] return segmentations, seg_types, seg_names def _retrieve_segmentations(self, index, start_frame, end_frame): if not self.preload_videos: # segmentations_dir = self._path_to_segmentations(index) # if (segmentations_dir / "segmentations.hdf5").exists(): # random access hdf5 exists, let's use it # segmentations, seg_types, seg_names = load_segmentation_list_v2(segmentations_dir / "segmentations.hdf5", start_frame, end_frame) # elif (segmentations_dir / "segmentations.pkl").exists(): # segmentations are saved in a single pickle # seg_images, seg_types, seg_names = load_segmentation_list(segmentations_dir / "segmentations.pkl") # segmentations = seg_images[start_frame: end_frame] # if isinstance(seg_images, list): # segmentations = np.stack(seg_images, axis=0) # if seg_images.ndim == 4: # T, C=1, W, H # segmentations = segmentations[:,0,...] segmentations, seg_types, seg_names = self._read_segmentations(index, start_frame, end_frame) return segmentations, seg_types, seg_names else: video_path = str(self._get_video_path(index)) segmentations, seg_types, seg_names = self.seg_cache[video_path] segmentations = segmentations[start_frame: end_frame] seg_types = seg_types[start_frame: end_frame] seg_names = seg_names[start_frame: end_frame] return segmentations, seg_types, seg_names def _load_reconstructions(self, index, rec_type, appearance=False, start_frame=None, end_frame=None): reconstructions_dir = self._path_to_reconstructions(index, rec_type) if (reconstructions_dir / "shape_pose_cam.hdf5").exists(): # random access hdf5 exists, let's use it shape_pose_cam = load_reconstruction_list_v2(reconstructions_dir / "shape_pose_cam.hdf5", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list_v2(reconstructions_dir / "appearance.hdf5", start_frame=start_frame, end_frame=end_frame) else: appearance = None elif (reconstructions_dir / "shape_pose_cam.pkl").exists(): # reconstructions are saved in a single pickle shape_pose_cam = load_reconstruction_list(reconstructions_dir / "shape_pose_cam.pkl", start_frame=start_frame, end_frame=end_frame) if appearance: appearance = load_reconstruction_list(reconstructions_dir / "appearance.pkl", start_frame=start_frame, end_frame=end_frame) else: appearance = None ## should no longer be necessary as the start/end frame is now handled in the load_reconstruction_list function # if start_frame is not None and end_frame is not None: # shape_pose_cam = {key: shape_pose_cam[key][:, start_frame: end_frame] for key in shape_pose_cam.keys()} # if appearance is not None: # appearance = {key: appearance[key][:, start_frame: end_frame] for key in appearance.keys()} else: raise RuntimeError(f"Reconstruction file not found in {reconstructions_dir}") # for key in shape_pose_cam.keys(): # shape_pose_cam[key] = np.copy(shape_pose_cam[key]) # for key in appearance.keys(): # appearance[key] = np.copy(appearance[key]) return shape_pose_cam, appearance def _load_emotions(self, index, features=False, start_frame=None, end_frame=None): emotions_dir = self._path_to_emotions(index) if (emotions_dir / "emotions.hdf5").exists(): # random access hdf5 exists, let's use it
emotions = load_emotion_list_v2(emotions_dir / "emotions.hdf5", start_frame, end_frame)
13
2023-11-07 20:13:32+00:00
16k
hxz393/ConfigCenterComparer
ui/action_compare.py
[ { "identifier": "get_resource_path", "path": "lib/get_resource_path.py", "snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对路径,可以是字符串或 os.PathLike 对象。\n :rtype: Optional[str]\n :return: 资源的绝对路径,如果发生错误则返回 None。\n \"\"\"\n\n try:\n base_path = getattr(sys, '_MEIPASS', os.path.abspath(\".\"))\n return os.path.join(base_path, os.path.normpath(relative_path))\n except Exception:\n logger.exception(\"An error occurred while retrieving resource path\")\n return None" }, { "identifier": "log_time", "path": "lib/log_time.py", "snippet": "def log_time(func: Callable) -> Callable:\n \"\"\"\n 一个装饰器,用于记录被装饰函数的运行时间。\n\n 此装饰器在函数执行前后记录时间,计算并记录函数的运行时间。如果函数执行期间出现异常,将记录异常并返回 None。\n\n :param func: 被装饰的函数。\n :type func: Callable\n :return: 包装后的函数。\n :rtype: Callable\n\n :example:\n >>> @log_time\n ... def test_function():\n ... time.sleep(1)\n ...\n >>> test_function() # 这将记录 test_function 的运行时间\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs) -> Any:\n \"\"\"\n 包装函数,用于实际执行被装饰的函数并计算其运行时间。\n\n 此函数首先记录开始时间,然后尝试执行原始函数,最后记录结束时间并计算运行时长。如果在执行过程中出现异常,会记录异常信息。\n\n :param args: 原始函数的位置参数。\n :param kwargs: 原始函数的关键字参数。\n :return: 原始函数的返回值,如果出现异常则返回 None。\n :rtype: Any\n \"\"\"\n start_time = time.time()\n try:\n result = func(*args, **kwargs)\n except Exception as e:\n logger.exception(f\"Exception occurred in {func.__name__}: {e}\")\n return None\n else:\n end_time = time.time()\n logger.debug(f\"{func.__name__} executed in {end_time - start_time:.2f} seconds.\")\n return result\n\n return wrapper" }, { "identifier": "ConfigManager", "path": "ui/config_manager.py", "snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")" }, { "identifier": "DialogComparison", "path": "ui/dialog_comparison.py", "snippet": "class DialogComparison(QDialog):\n \"\"\"\n 对话框类,用于展示不同环境下配置的自我比较结果。\n\n :param lang_manager: 语言管理器实例,用于处理语言相关设置。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器实例,用于管理配置。\n :type config_manager: ConfigManager\n :param data: 包含环境配置比较结果的字典。\n :type data: Dict[str, Dict[str, List[Dict[str, str]]]]\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n data: Dict[str, Dict[str, List[Dict[str, str]]]]):\n super().__init__(flags=Qt.Dialog | Qt.WindowCloseButtonHint)\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n self.lang = self.lang_manager.get_lang()\n self.data = data\n\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 运行语言配置和设置窗口\n self.setWindowIcon(QIcon(get_resource_path('media/icons8-diff-files-26.png')))\n self.setMinimumSize(1000, 480)\n self.setStyleSheet(\"font-size: 14px;\")\n # 设置主布局\n self.layout = QVBoxLayout(self)\n self.layout.setContentsMargins(0, 0, 0, 0)\n # 创建过滤栏\n filter_bar = self._create_filter_bar()\n self.layout.addWidget(filter_bar)\n # 加入横向分割线\n separator = QFrame()\n separator.setFrameShape(QFrame.HLine)\n separator.setFrameShadow(QFrame.Sunken)\n self.layout.addWidget(separator)\n # 运行语言配置,创建表格要用到\n self.update_lang()\n # 创建标签页\n tab_widget = QTabWidget()\n self.layout.addWidget(tab_widget)\n for env in self.env_keys:\n tab_widget.addTab(self._create_tab(env), env)\n except Exception:\n logger.exception(\"Failed to initialize DialogComparison UI components\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _create_filter_bar(self) -> QWidget:\n \"\"\"\n 创建过滤栏组件。包含公共配置标记和搜索功能。\n\n :rtype: QWidget\n :return: 返回过滤栏组件。\n \"\"\"\n # 建立横向过滤器布局\n filter_bar = QWidget()\n layout = QHBoxLayout(filter_bar)\n layout.setContentsMargins(10, 10, 10, 0)\n filter_bar.setLayout(layout)\n\n # 建立标签,加入布局\n self.public_label = QLabel()\n layout.addWidget(self.public_label)\n # 设置输入框\n self.public_box = QLineEdit()\n self.public_box.returnPressed.connect(self.set_public)\n self.public_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.public_box.setMinimumWidth(100)\n self.public_box.setMaximumWidth(200)\n layout.addWidget(self.public_box)\n # 设置按钮\n self.public_button = QPushButton()\n self.public_button.clicked.connect(self.set_public)\n layout.addWidget(self.public_button)\n\n # 加入分割线\n separator = QFrame()\n separator.setFrameShape(QFrame.VLine)\n separator.setFrameShadow(QFrame.Raised)\n layout.addWidget(separator)\n\n # 建立标签,加入布局\n self.search_label = QLabel()\n layout.addWidget(self.search_label)\n # 设置输入框\n self.search_box = QLineEdit()\n self.search_box.returnPressed.connect(self.search_value)\n self.search_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.search_box.setMinimumWidth(100)\n layout.addWidget(self.search_box)\n # 设置按钮\n self.search_button = QPushButton()\n self.search_button.clicked.connect(self.search_value)\n layout.addWidget(self.search_button)\n\n return filter_bar\n\n def _create_tab(self, env: str) -> QWidget:\n \"\"\"\n 创建一个标签页。\n\n :param env: 环境名。\n :type env: str\n\n :rtype: QWidget\n :return: 返回标签页。\n \"\"\"\n tab = QWidget()\n tab_layout = QVBoxLayout(tab)\n table = self._create_table(self.data.get(env, {}))\n tab_layout.addWidget(table)\n\n # 为每个 table 实例化 ActionCopy 和 ActionSave\n table.actionCopy = ActionCopy(self.lang_manager, table)\n table.actionCopy.status_updated.connect(self.forward_status)\n table.actionSave = ActionSave(self.lang_manager, table)\n table.actionSave.status_updated.connect(self.forward_status)\n # 为每个 table 创建右键菜单\n table.setContextMenuPolicy(Qt.CustomContextMenu)\n table.customContextMenuRequested.connect(self._cell_context_menu)\n\n return tab\n\n def _cell_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表格单元格的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n sender = self.sender()\n # 确定sender是QTableWidget,且拥有actionCopy和actionSave属性\n if isinstance(sender, QTableWidget):\n if hasattr(sender, 'actionCopy') and hasattr(sender, 'actionSave'):\n copy = getattr(sender, 'actionCopy')\n save = getattr(sender, 'actionSave')\n menu = QMenu(sender)\n menu.addAction(copy.action_copy)\n menu.addAction(save.action_save)\n menu.exec_(sender.viewport().mapToGlobal(pos))\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.setWindowTitle(self.lang['ui.dialog_comparison_1'])\n # 更新标签页\n self.env_keys = [\n self.lang['ui.dialog_settings_connection_2'],\n self.lang['ui.dialog_settings_connection_3'],\n self.lang['ui.dialog_settings_connection_4'],\n self.lang['ui.dialog_settings_connection_5']\n ]\n self._update_tab_titles(self.env_keys)\n # 更新表头\n self.column_headers = [\n self.lang['ui.table_main_1'],\n self.lang['ui.table_main_2'],\n self.lang['ui.table_main_3'],\n self.lang['ui.action_compare_3'],\n ]\n self._update_all_table_headers(self.column_headers)\n # 更新其他文字\n self.public_label.setText(self.lang['ui.dialog_comparison_2'])\n self.public_button.setText(self.lang['ui.dialog_comparison_3'])\n self.public_box.setToolTip(self.lang['ui.dialog_comparison_5'])\n self.search_label.setText(self.lang['ui.dialog_comparison_4'])\n self.search_button.setText(self.lang['ui.filter_bar_9'])\n self.search_box.setToolTip(self.lang['ui.dialog_comparison_6'])\n\n def _update_tab_titles(self, new_titles: List[str]) -> None:\n \"\"\"\n 更新标签页标题。\n\n :param new_titles: 包含新标题的列表。\n :type new_titles: List[str]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n tab_widget = self.findChild(QTabWidget)\n # 确定标签页存在,且标签数量与新标题数量相等\n if tab_widget is not None and len(new_titles) == tab_widget.count():\n for index, title in enumerate(new_titles):\n tab_widget.setTabText(index, title)\n\n def _update_all_table_headers(self, new_headers: List[str]) -> None:\n \"\"\"\n 更新所有标签页中表格的表头。\n\n :param new_headers: 包含新表头的列表。\n :type new_headers: List[str]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n tab_widget = self.findChild(QTabWidget)\n if tab_widget is None:\n return\n # 循环设置每个标签页中的表格表头\n for i in range(tab_widget.count()):\n table = tab_widget.widget(i).findChild(QTableWidget)\n for j, header in enumerate(new_headers):\n table.setHorizontalHeaderItem(j, QTableWidgetItem(header))\n\n def _create_table(self, items: Dict[str, List[Dict[str, str]]]) -> QTableWidget:\n \"\"\"\n 建立表格并插入数据。\n\n :param items: 包含单个环境配置比较结果。\n :type items: List[Dict[str, str]]]\n\n :rtype: QTableWidget\n :return: 返回建好的表格组件。\n \"\"\"\n table = QTableWidget()\n # 配置表格基本属性\n table.setColumnCount(len(self.column_headers))\n table.setHorizontalHeaderLabels(self.column_headers)\n table.setEditTriggers(QTableWidget.NoEditTriggers)\n table.setSelectionBehavior(QTableWidget.SelectItems)\n table.setTextElideMode(Qt.ElideNone)\n table.horizontalHeader().setMinimumSectionSize(220)\n\n # 向表格插入数据。先计算总行数,禁用更新,优化性能。\n table.setUpdatesEnabled(False)\n table.setRowCount(sum(len(group) for group in items.values()))\n self._insert_data_to_table(table, items)\n table.setUpdatesEnabled(True)\n\n return table\n\n def _insert_data_to_table(self,\n table: QTableWidget,\n items: Dict[str, List[Dict[str, str]]]) -> None:\n \"\"\"\n 向表格插入特定格式的数据。\n\n :param table: 展示结果表格。\n :type table: QTableWidget\n :param items: 包含单个环境配置比较结果。\n :type items: Dict[str, List[Dict[str, str]]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 如果没数据,直接返回\n if not items:\n return\n\n # 两种颜色:白色和灰色\n color_palette = [Qt.white, QColor(COLOR_SKIP)]\n # 开始行数\n row_count = 0\n\n # 索引键不需要,直接获取对结果分好组的列表\n for group_number, item_group in enumerate(items.values(), start=1):\n # 要单元格设置的背景颜色\n group_color = color_palette[group_number % len(color_palette)]\n # 对包含多组配置字典的列表进行处理\n for item_index, item in enumerate(item_group, start=1):\n # 为每行设置组号\n table.setVerticalHeaderItem(row_count, QTableWidgetItem(f\"{group_number}.{item_index}\"))\n # 对表头处理,col_index为列号,key为列标题\n for col_index, key in enumerate(self.column_headers):\n # 避免 KeyError\n value = item.get(key, \"\")\n # 设置单元格数据\n table_item = QTableWidgetItem(str(value))\n # 为单元格设置背景颜色\n table_item.setBackground(group_color)\n # 通过行号、列号和数据信息插入到表格\n table.setItem(row_count, col_index, table_item)\n # 插入完一行后,行号加一\n row_count += 1\n except Exception:\n logger.exception(\"Failed to insert data into table\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def forward_status(self, message: str) -> None:\n \"\"\"\n 用于转发状态信号。\n\n :param message: 要转发的消息。\n :type message: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.status_updated.emit(message)\n\n def _get_current_table(self) -> Optional[QTableWidget]:\n \"\"\"\n 获取当前选中标签页中的表格。\n\n :rtype: Optional[QTableWidget]\n :return: 返回当前选中标签页中的 QTableWidget 实例。没获取到则返回 None。\n \"\"\"\n tab_widget = self.findChild(QTabWidget)\n if tab_widget is None:\n return None\n\n current_tab = tab_widget.currentWidget()\n if current_tab is None:\n return None\n\n table = current_tab.findChild(QTableWidget)\n return table\n\n def set_public(self) -> None:\n \"\"\"\n 根据用户输入公共配置的名称,设置表格中对应行的字体颜色。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 如果关闭颜色设置,直接返回。\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n if color_switch == 'OFF':\n return\n\n # 获取输入值和表格。表格为空则返回。\n public_value = self.public_box.text().strip()\n table = self._get_current_table()\n if table is None:\n return\n\n # 无论输入值是否为空,都先重置表格字体颜色\n self._reset_table_font_color(table)\n\n # 输入值为空,直接返回。\n if not public_value:\n return\n\n # 遍历表格设置匹配行的字体颜色\n for row in range(table.rowCount()):\n cell_item = table.item(row, 0)\n if cell_item and public_value == cell_item.text():\n self._set_row_font_color(table, row, Qt.red)\n\n @staticmethod\n def _set_row_font_color(table: QTableWidget,\n row: int,\n color: str) -> None:\n \"\"\"\n 设置特定行的字体颜色。\n\n :param table: 要操作的表格对象。\n :type table: QTableWidget\n :param row: 行号。\n :type row: int\n :param color: 字体颜色。\n :type color: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for column in range(table.columnCount()):\n cell_item = table.item(row, column)\n if cell_item:\n cell_item.setForeground(QColor(color))\n\n @staticmethod\n def _reset_table_font_color(table: QTableWidget) -> None:\n \"\"\"\n 重置表格所有单元格的字体颜色为黑色。\n\n :param table: 要操作的表格对象。\n :type table: QTableWidget\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for row in range(table.rowCount()):\n for column in range(table.columnCount()):\n cell_item = table.item(row, column)\n if cell_item:\n cell_item.setForeground(Qt.black)\n\n def search_value(self) -> None:\n \"\"\"\n 根据用户输入的搜索字段,去表格中所有配置键和配置值中去搜索匹配。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 获取用户输入的搜索文本和表格。\n search_text = self.search_box.text().strip().lower()\n table = self._get_current_table()\n\n # 如果没有找到表格,直接返回\n if table is None:\n return\n\n # 如果输入框为空,重置所有行为可见\n if not search_text:\n self._reset_row_hidden_status(table)\n return\n\n # 逐行匹配搜索值\n for row in range(table.rowCount()):\n self._search_process(table, row, search_text)\n\n @staticmethod\n def _search_process(table: QTableWidget,\n row: int,\n search_text: str) -> None:\n \"\"\"\n 作用于单行,根据搜索文本设置可见性。\n\n :param table: 表格对象。\n :type table: QTableWidget\n :param row: 当前行号。\n :type row: int\n :param search_text: 搜索文本。\n :type search_text: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 变量和实际显示匹配。先设为False为不显示\n row_contains_search_text = False\n # 只搜索键和值列\n for column in [2, 3]:\n # 获取单元格文本,并小写化,匹配搜索文本,让搜索不区分大小写\n cell_text = table.item(row, column).text().lower()\n # 找到匹配项,跳出内层循环\n if search_text in cell_text:\n row_contains_search_text = True\n break\n\n table.setRowHidden(row, not row_contains_search_text)\n\n @staticmethod\n def _reset_row_hidden_status(table: QTableWidget) -> None:\n \"\"\"\n 重置表格行的隐藏状态。\n\n :param table: 表格对象。\n :type table: QTableWidget\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n if table is not None:\n for row in range(table.rowCount()):\n table.setRowHidden(row, False)" }, { "identifier": "global_signals", "path": "ui/global_signals.py", "snippet": "class GlobalSignals(QObject):" }, { "identifier": "LangManager", "path": "ui/lang_manager.py", "snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")" }, { "identifier": "message_show", "path": "ui/message_show.py", "snippet": "def message_show(message_type: str,\n text: str) -> None:\n \"\"\"\n 显示指定类型的消息框。\n\n 根据提供的消息类型和文本内容,显示相应的消息框。支持的消息类型包括 'Critical'、'Warning' 和 'Information'。\n\n :param message_type: 消息类型,支持 'Critical'、'Warning' 和 'Information'。\n :type message_type: str\n :param text: 消息框中显示的文本内容。\n :type text: str\n :return: 无返回值。\n :rtype: None\n \"\"\"\n try:\n msg_box = QMessageBox()\n msg_box.setText(text)\n msg_box.setStandardButtons(QMessageBox.Ok)\n msg_box.setWindowTitle(message_type)\n\n if message_type == 'Critical':\n msg_box.setIcon(QMessageBox.Critical)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-error-26')))\n elif message_type == 'Warning':\n msg_box.setIcon(QMessageBox.Warning)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-do-not-disturb-26')))\n elif message_type == 'Information':\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-about-26')))\n else:\n logger.warning(\"Invalid message type provided.\")\n\n msg_box.exec_()\n except Exception:\n logger.exception(\"An error occurred while displaying the message box\")" }, { "identifier": "TableMain", "path": "ui/table_main.py", "snippet": "class TableMain(QTableWidget):\n \"\"\"\n 主表格类,用于展示和管理数据行。\n\n 此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。\n 通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。\n\n :param lang_manager: 用于管理界面语言的 LangManager 实例。\n :type lang_manager: LangManager\n :param config_manager: 用于管理配置的 ConfigManager 实例。\n :type config_manager: ConfigManager\n\n :author: assassing\n :contact: https://github.com/hxz393\n :copyright: Copyright 2023, hxz393. 保留所有权利。\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n # 实例化用到的组件\n self.actionCopy = ActionCopy(self.lang_manager, self)\n self.actionSave = ActionSave(self.lang_manager, self)\n self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)\n self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self)\n # 手动连接实例化的组件信号到转发函数\n self.actionCopy.status_updated.connect(self.forward_status)\n self.actionSave.status_updated.connect(self.forward_status)\n self.actionSkip.status_updated.connect(self.forward_status)\n self.actionSkip.filter_updated.connect(self.forward_filter)\n self.actionUnskip.status_updated.connect(self.forward_status)\n self.actionUnskip.filter_updated.connect(self.forward_filter)\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面。\n\n 此方法负责设置表格的基本属性,如列数、表头标签、选择行为等。还包括对特定列的隐藏和宽度调整策略的设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 先运行语言更新,里面有表头定义\n self.update_lang()\n self.hidden_cols = [\"pro_time\", \"pre_time\", \"test_time\", \"dev_time\"]\n self.resize_cols = [\"name\", \"group\", \"consistency\", \"skip\"]\n # 配置表格基本属性\n self.setColumnCount(len(self.column_headers))\n self.setHorizontalHeaderLabels(self.column_headers)\n self.setEditTriggers(QTableWidget.NoEditTriggers)\n self.setSelectionBehavior(QTableWidget.SelectItems)\n # 隐藏垂直表头\n self.verticalHeader().setVisible(False)\n # 启用自动换行,没生效\n self.setWordWrap(True)\n self.setTextElideMode(Qt.ElideNone)\n # 为表头视图设置上下文菜单事件\n self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)\n self.horizontalHeader().customContextMenuRequested.connect(self._header_context_menu)\n # 为表单设置上下文菜单事件\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(self._cell_context_menu)\n # 隐藏指定列\n [self.hideColumn(COL_INFO[i]['col']) for i in self.hidden_cols]\n # 设置表宽度策略\n self.set_header_resize()\n\n def set_header_resize(self):\n \"\"\"\n 设置表头的列宽度和调整策略。\n\n 此方法负责定义表头列的宽度调整策略和其他相关属性。它设置了表头列的默认宽度、是否可拖动以及列的自动调整策略。\n 例如,某些列被设置为根据内容自动调整宽度,而其他列则被设置为可伸缩以适应表格的大小。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 设置默认列宽度,列宽调整策略,列可拖动\n self.horizontalHeader().setSectionsMovable(True)\n self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.horizontalHeader().setMinimumSectionSize(100)\n # 设置要自动调整宽度的列\n [self.horizontalHeader().setSectionResizeMode(COL_INFO[i]['col'], QHeaderView.ResizeToContents) for i in self.resize_cols]\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.column_headers = [\n self.lang['ui.table_main_1'],\n self.lang['ui.table_main_2'],\n self.lang['ui.table_main_3'],\n self.lang['ui.dialog_settings_connection_2'],\n f\"{self.lang['ui.dialog_settings_connection_2']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_3'],\n f\"{self.lang['ui.dialog_settings_connection_3']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_4'],\n f\"{self.lang['ui.dialog_settings_connection_4']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_5'],\n f\"{self.lang['ui.dialog_settings_connection_5']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.table_main_5'],\n self.lang['ui.table_main_6'],\n ]\n # 重新应用到表头\n self.setHorizontalHeaderLabels(self.column_headers)\n # 定义数据和显示映射的字典\n consistency_status_mapping = {\n \"inconsistent\": self.lang['ui.action_start_8'],\n \"fully\": self.lang['ui.action_start_9'],\n \"partially\": self.lang['ui.action_start_10'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n skip_status_mapping = {\n \"no\": self.lang['ui.action_start_11'],\n \"yes\": self.lang['ui.action_start_12'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n for row in range(self.rowCount()):\n # 更新忽略状态文字\n self._update_item_text(row, \"skip\", skip_status_mapping)\n # 更新一致性状态文字\n self._update_item_text(row, \"consistency\", consistency_status_mapping)\n\n def _update_item_text(self,\n row: int,\n user_data_key: str,\n text_mapping: Dict[str, str]) -> None:\n \"\"\"\n 根据提供的文本映射更新指定行的项文本。\n\n 此方法用于更新表格或列表中特定行的文本。它根据用户数据键(user_data_key)获取对应行的项,然后根据提供的文本映射(text_mapping)更新该项的文本。\n\n :param row: 要更新的行索引。\n :type row: int\n :param user_data_key: 用于获取项的用户数据键。\n :type user_data_key: str\n :param text_mapping: 用户数据到文本的映射字典。\n :type text_mapping: Dict[str, str]\n\n :return: 无返回值。\n :rtype: None\n \"\"\"\n item = self.item(row, COL_INFO[user_data_key]['col'])\n if item is not None:\n user_data = item.data(Qt.UserRole)\n if user_data in text_mapping:\n item.setText(text_mapping[user_data])\n\n def keyPressEvent(self, event: QKeyEvent) -> None:\n \"\"\"\n 处理键盘事件。\n\n 此方法用于处理键盘事件,特别是复制功能的快捷键。如果按下 Ctrl+C,则复制选中的单元格内容。\n\n :param event: 键盘事件对象。\n :type event: QKeyEvent\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n if event.key() == Qt.Key_C and (event.modifiers() & Qt.ControlModifier):\n self.actionCopy.action_copy()\n else:\n super().keyPressEvent(event)\n\n def _cell_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表格单元格的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n menu.addAction(self.actionCopy.action_copy)\n separator = QAction(menu)\n separator.setSeparator(True)\n menu.addAction(separator)\n menu.addAction(self.actionSkip.action_skip)\n menu.addAction(self.actionUnskip.action_unskip)\n sep = QAction(menu)\n sep.setSeparator(True)\n menu.addAction(sep)\n menu.addAction(self.actionSave.action_save)\n menu.exec_(self.viewport().mapToGlobal(pos))\n\n def _header_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表头的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n # 动态创建一个菜单项,用于隐藏/显示列\n for index in range(self.columnCount()):\n column_name = self.horizontalHeaderItem(index).text()\n action = menu.addAction(f\"{column_name}\")\n action.setCheckable(True)\n action.setChecked(not self.isColumnHidden(index))\n action.setData(index)\n action.triggered.connect(self._toggle_column_visibility)\n # 在鼠标右键点击位置显示菜单\n menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos))\n\n def _toggle_column_visibility(self) -> None:\n \"\"\"\n 根据用户选择,切换列的可见性。\n\n 此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n action = self.sender()\n if isinstance(action, QAction):\n column_index = action.data()\n if action.isChecked():\n self.showColumn(column_index)\n else:\n self.hideColumn(column_index)\n\n def add_row(self, data: List[List[str]]) -> None:\n \"\"\"\n 向表格中添加一行数据。\n\n :param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n row_position = 0\n try:\n # 获取最后行数\n row_position = self.rowCount()\n # 插入最后一行\n self.insertRow(row_position)\n # 插入单元格数据\n self._fill_row_data(row_position, data)\n except Exception:\n logger.exception(f\"Error occurred while adding a new row at position {row_position}\")\n self.removeRow(row_position)\n\n def _fill_row_data(self,\n row_position: int,\n data: List[List[str]]) -> None:\n \"\"\"\n 填充指定行的数据。\n\n :param row_position: 行位置\n :param data: 行数据\n :type row_position: int\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for column, (display_text, user_data) in enumerate(data):\n # 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole)\n item = QTableWidgetItem(str(display_text))\n # 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole)\n item.setData(Qt.UserRole, user_data)\n # 设置单元格不可编辑状态\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\n # 正常表格插入方法\n self.setItem(row_position, column, item)\n\n @log_time\n def apply_color_to_table(self, rows: List[int] = None) -> None:\n \"\"\"\n 对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。\n\n :param rows: 可选,要应用颜色的行号列表。\n :type rows: List[int], optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n if color_switch == 'OFF':\n return\n\n if rows is None or not isinstance(rows, list):\n rows = range(self.rowCount())\n\n try:\n for row in rows:\n # 不给隐藏行设置颜色\n if self.isRowHidden(row):\n continue\n\n self._process_row_for_color(row)\n except Exception:\n logger.exception(\"Exception in apply_color_to_table method\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _process_row_for_color(self, row: int) -> None:\n \"\"\"\n 根据一致性、跳过状态和是否为空值给单行应用颜色。\n\n :param row: 行号,对每行进行颜色处理。\n :type row: int\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)\n skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)\n # 忽略状态为是时设置颜色\n if skip_data == 'yes':\n self.apply_color(row, COLOR_SKIP)\n return\n\n # 根据一致性值设置颜色\n if consistency_data == 'fully':\n self.apply_color(row, COLOR_CONSISTENCY_FULLY)\n elif consistency_data == 'partially':\n self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY)\n else:\n self.apply_color(row, COLOR_DEFAULT)\n\n # 遍历指定列检查空值,并赋予颜色\n for column in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(column):\n if self.item(row, column).text() == 'None':\n self.apply_color(row, COLOR_EMPTY, column)\n\n def apply_color(self,\n row: int,\n color: str,\n column: Optional[int] = None) -> None:\n \"\"\"\n 为指定的行或单元格应用颜色。\n\n :param row: 要着色的行索引。\n :type row: int\n :param color: 要应用的颜色。\n :type color: str\n :param column: 可选,指定要着色的列索引,如果未指定,则对整行应用颜色。\n :type column: int, optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n color_brush = QBrush(QColor(color))\n if column is not None:\n self.item(row, column).setBackground(color_brush)\n else:\n for col in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(col):\n self.item(row, col).setBackground(color_brush)\n except Exception:\n logger.exception(\"Error occurred while applying color to a cell\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def clear(self) -> None:\n \"\"\"\n 清空表格中的所有行。\n\n 此方法用于清除表格中的所有数据,通常在数据更新或重置时使用。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 禁用更新以提高性能\n self.setUpdatesEnabled(False)\n # 首先清除所有单元格的内容\n self.clearContents()\n # 将行数设置为0,从而删除所有行\n self.setRowCount(0)\n except Exception:\n logger.exception(\"Error occurred while clearing the table.\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 确保即使发生错误也要重新启用更新\n self.setUpdatesEnabled(True)\n\n def forward_status(self, message: str) -> None:\n \"\"\"\n 用于转发状态信号。\n\n :param message: 要转发的消息。\n :type message: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.status_updated.emit(message)\n\n def forward_filter(self, rows: List[int]) -> None:\n \"\"\"\n 用于转发过滤信号。\n\n :param rows: 要转发的行列表。\n :type rows: List[int]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.filter_updated.emit(rows)\n\n def get_table_data(self) -> Dict[int, Dict[str, str]]:\n \"\"\"\n 用于获取表格所有数据。\n\n :rtype: Dict[int, Dict[str, str]]\n :return: 返回嵌套字典。键为行号,值为字典,字典中键为列标题,值为内容。类似于:{882: {'服务': 'web', '分组': 'application'}, 883: {'服务': 'web', '分组': 'application'}}\n \"\"\"\n return {row: {self.horizontalHeaderItem(col).text(): self.item(row, col).data(Qt.UserRole)\n for col in range(self.columnCount())}\n for row in range(self.rowCount())}" } ]
import logging from typing import Dict, Optional, List from PyQt5.QtCore import QObject, pyqtSignal from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction from lib.get_resource_path import get_resource_path from lib.log_time import log_time from ui.config_manager import ConfigManager from ui.dialog_comparison import DialogComparison from ui.global_signals import global_signals from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
12,167
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2'])
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2'])
@log_time
1
2023-11-07 01:02:38+00:00
16k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetric per-token activation,\n and int8 symmetric per-channel weight quantization\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n ) -> None:\n super().__init__(in_features, out_features, bias)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n of int8 dynamic symmetric per-token activation and int8 symmetric per-channel weight\n quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n\n Y = quant_int8_dynamic_per_token_linear(\n X, self.W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(\n cls, mod: torch.nn.Linear\n ) -> \"DynamicallyPerAxisQuantizedLinear\":\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `DynamicallyPerAxisQuantizedLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n DynamicallyPerAxisQuantizedLinear: The converted quantized linear module.\n\n \"\"\"\n\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n W_int_repr, W_scales, _W_zps = dynamically_quantize_per_channel(\n mod.weight, -128, 127, torch.int8\n )\n new_mod.register_buffer(\"W_int_repr_t\", W_int_repr.contiguous().t())\n new_mod.W_scales = nn.Parameter(W_scales)\n new_mod.bias = mod.bias\n del new_mod.weight\n\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod" }, { "identifier": "apply_dynamic_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_dynamic_quant(model, filter_fn=None):\n \"\"\"\n Applies dynamic symmetric per-token activation and per-channel weight\n quantization to all linear layers in the given model using\n module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n lambda mod: DynamicallyPerAxisQuantizedLinear.from_float(mod),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "apply_weight_only_int8_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_weight_only_int8_quant(model, filter_fn=None):\n \"\"\"\n Applies weight-only symmetric per-channel int8 quantization to all linear layers\n in the given model using module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n WeightOnlyInt8QuantLinear.from_float,\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int8_dqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_dqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the `Int8DynamicallyQuantizedLinearWeight`\n Tensor subclass, effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n if filter_fn is None:\n filter_fn = (\n lambda *args:\n _is_linear(*args) and\n _in_features_greater_than_16(*args)\n )\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight),\n filter_fn\n )" }, { "identifier": "change_linear_weights_to_int8_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_woqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the\n `Int8WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int4_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int4_woqtensors(model, **kwargs):\n \"\"\"\n Converts all linear weight tensors to the\n `Int4WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n filter_fn = kwargs.pop(\"filter_fn\", _is_linear)\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, **kwargs),\n filter_fn,\n )" }, { "identifier": "_replace_with_custom_fn_if_matches_filter", "path": "torchao/quantization/quant_api.py", "snippet": "def _replace_with_custom_fn_if_matches_filter(\n model, replacement_fn, filter_fn, cur_fqn=\"\"\n) -> None:\n \"\"\"\n For each `child` in `model`, replaces it with `replacement_fn(child)`\n if `filter_fn(child)` is `True`\n \"\"\"\n if filter_fn(model, cur_fqn[:-1]):\n model = replacement_fn(model)\n return model\n else:\n for name, child in model.named_children():\n new_child = _replace_with_custom_fn_if_matches_filter(\n child, replacement_fn, filter_fn, f\"{cur_fqn}{name}.\"\n )\n if new_child is not child:\n setattr(model, name, new_child)\n return model" }, { "identifier": "dequantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_channel(int_repr, scales, zero_points, out_dtype=torch.float32):\n # assumes axis is 0\n y = int_repr.transpose(0, 1)\n y = y.to(out_dtype)\n y = y - zero_points\n y = y * scales\n y = y.transpose(0, 1)\n return y" }, { "identifier": "dequantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_tensor(int_repr, scale, zero_point, out_dtype=torch.float32):\n y = int_repr.to(out_dtype)\n if zero_point is not None:\n y -= zero_point\n return y * scale" }, { "identifier": "dynamically_quantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(future): relax ^ as needed\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n # get min and max\n min_val, max_val = torch.aminmax(x, dim=1)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n # reference: https://fburl.com/code/4wll53rk\n max_val_pos = torch.max(-min_val_neg, max_val_pos)\n scale = max_val_pos / (float(quant_max - quant_min) / 2)\n # ensure scale is the same dtype as the original tensor\n scale = torch.clamp(scale, min=eps).to(x.dtype)\n zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n x_div = x.transpose(0, 1) / scale\n x_round = torch.round(x_div)\n x_zp = x_round + zero_point\n x_zp = x_zp.transpose(0, 1)\n quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)\n\n return quant, scale, zero_point" }, { "identifier": "dynamically_quantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_tensor(\n x,\n quant_min,\n quant_max,\n target_dtype,\n qscheme=torch.per_tensor_affine, # for now, reuse existing qscheme enum\n):\n # assumes affine quantization\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n if qscheme == torch.per_tensor_affine:\n # get min and max\n # TODO(future): make torch.aminmax work on cpu-half\n # min_val, max_val = torch.aminmax(x)\n min_val = torch.min(x)\n max_val = torch.max(x)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)\n # TODO(future): make torch.clamp with scalar work on cpu-half\n scale = torch.clamp(scale, min=eps).reshape(1)\n zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)\n zero_point = torch.clamp(zero_point, quant_min, quant_max)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n quant = torch.clamp(\n torch.round(x / scale) + zero_point, quant_min, quant_max\n ).to(target_dtype)\n\n else:\n assert qscheme == torch.per_tensor_symmetric, f\"unsupported qscheme {qscheme}\"\n # assert quant_min == -1 * quant_max, \"unsupported quant_min/quant_max\"\n amax = torch.max(torch.abs(x))\n scale = amax / (float(quant_max - quant_min) / 2)\n scale = torch.clamp(scale, min=eps).reshape(1)\n quant = torch.clamp(torch.round(x / scale), quant_min, quant_max).to(\n target_dtype\n )\n # do not create a tensor for zero_point as this is expensive\n zero_point = None\n\n return quant, scale, zero_point" }, { "identifier": "quant_int8_dynamic_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_linear(\n x,\n x_quant_min,\n x_quant_max,\n x_q_dtype,\n w_vals_int8_t,\n w_scales,\n w_vals_int8_t_sums_int64,\n bias,\n out_dtype=torch.float32,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scale, x_zp = dynamically_quantize_per_tensor(\n x, x_quant_min, x_quant_max, x_q_dtype\n )\n # w_vals_int8_t_sums_int64 = w_vals_int8_t.sum(dim=0)\n mm_out = quant_int8_matmul(\n x_vals_int8,\n x_scale,\n x_zp,\n w_vals_int8_t,\n w_vals_int8_t_sums_int64,\n w_scales,\n out_dtype,\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quant_int8_dynamic_per_token_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_per_token_linear(\n x,\n w_vals_int8_t,\n w_scales,\n bias,\n out_dtype,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)\n mm_out = quant_int8_per_token_matmul(\n x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quantize_activation_per_token_absmax", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quantize_activation_per_token_absmax(t):\n n_bits = 8\n # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1]\n\n scales = t.abs().amax(dim=-1, keepdim=True)\n if scales.dtype == torch.float16:\n scales = (\n scales.float()\n ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)\n q_max = 2 ** (n_bits - 1) - 1\n scales = scales.clamp(min=1e-5).div(q_max)\n # Note: the original smoothquant does not clamp to qmin/qmax here,\n # but some of the tests with bfloat16 ended up with a flipped sign\n # if we don't clamp. TODO(future) look into this further.\n t = torch.round(t / scales).clamp(-127, 127).to(torch.int8)\n return t, scales" }, { "identifier": "safe_int_mm", "path": "torchao/quantization/quant_primitives.py", "snippet": "def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n This function wraps torch._int_mm and avoids several undesirable behaviors of the function for certain inputs while still\n returning correct results and being torch.compiled in a performant way.\n\n Assumes both tensors have dimension of 2.\n\n Note: no error checking for torch.compiled path, if input.shape = [i, j] and j<=16 then the triton kernel\n will error.\n\n Args:\n input (Tensor, int8): the first tensor to be multiplied\n mat2 (Tensor, int8): the second tensor to be multiplied\n\n Return:\n out (Tensor, int32): the result of the matmul with device matching that of the inputs\n \"\"\"\n\n # torch.compile path\n if dynamo_is_compiling() or \"FakeTensor\" in input.__repr__():\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)\n\n # error checking for cublas path\n assert (\n mat2.device == input.device\n ), f\"need both tensors to be on the same device but got {mat2.device} and {input.device}\"\n device_cpu = \"cpu\" in [mat2.device.type, input.device.type]\n # with input.shape = [i,j] and mat2.shape = [j,k]\n i_is_strictly_greater_than_16 = input.shape[0] > 16\n j_is_nonzero_multiple_of_8 = (input.shape[1] % 8 == 0) and (input.shape[1] > 0)\n k_is_nonzero_multiple_of_8 = (mat2.shape[1] % 8 == 0) and (mat2.shape[1] > 0)\n bad_dimensions_for_cublas = not (\n i_is_strictly_greater_than_16\n and j_is_nonzero_multiple_of_8\n and k_is_nonzero_multiple_of_8\n )\n\n if device_cpu or bad_dimensions_for_cublas:\n # fallback path\n return torch.matmul(input.cpu().to(torch.int32), mat2.cpu().to(torch.int32)).to(\n input.device.type\n )\n\n # cublas paths\n if not mat2.is_contiguous(): # silently gives incorrect result without this\n mat2 = mat2.contiguous()\n if (not input.is_contiguous()) and (\n input.shape[0] % 8 != 0\n ): # gives cryptic error without this\n input = (\n input.contiguous()\n ) # (it seems the transpose makes cublas check the above j constraint on i)\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)" }, { "identifier": "get_scale", "path": "torchao/quantization/smoothquant.py", "snippet": "def get_scale(X_absmax, W_absmax, alpha=0.5):\n \"\"\"\n Calculate the scale based on abs(max(X)), abs(max(W)) and alpha\n If X is of dimension `b*n*k` and W is dimension `k*m`, the returned\n scale is of dimension `k`.\n Note: X_absmax is calculated outside of this function because we\n need to keep a running version of it during calibration. W_absmax\n is calculated outside of this function for consistency with X_absmax.\n \"\"\"\n X_pow = torch.pow(X_absmax, alpha)\n W_pow = torch.pow(W_absmax, 1.0 - alpha)\n div = X_pow / W_pow\n return div.reshape(-1)" }, { "identifier": "smooth_fq_linear_to_inference", "path": "torchao/quantization/smoothquant.py", "snippet": "def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None:\n for _, mod in model.named_modules():\n if isinstance(mod, tuple(source_cls_to_target_cls.values())):\n if debug_skip_calibration:\n mod.set_debug_x_absmax()\n mod.to_inference()" }, { "identifier": "SmoothFakeDynamicallyQuantizedLinear", "path": "torchao/quantization/smoothquant.py", "snippet": "class SmoothFakeDynamicallyQuantizedLinear(SmoothFakeDynQuantMixin, torch.nn.Linear):\n \"\"\"\n This is a replacement for `torch.nn.Linear` which implements dynamic per-token\n activation quantization and dynamic per-channel weight quantization based on\n Smoothquant scaling.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n alpha = kwargs.pop(\"alpha\")\n super().__init__(*args, **kwargs)\n self.init_smoothquant_variables(alpha)\n\n def forward(self, X, *args, **kwargs):\n if self.calibrating:\n self.update_x_running_abs_max(X)\n Y = F.linear(X, self.weight, self.bias)\n else:\n if not self.debug_skip_scaling:\n # Ideally this would be fused into preceding layers\n # but in practice torch.compile fuses it with other\n # ops so the slowdown is minimal\n X = X / self.smooth_scale\n W_int_repr_t = (\n self.W_int_repr if self.store_w_int_repr_t else self.W_int_repr.t()\n )\n Y = quant_int8_dynamic_per_token_linear(\n X, W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(cls, mod, alpha=0.5):\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the smooth fake quantized\n version of it. Note: requires calibration.\n \"\"\"\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features, fake_out_features, bias=mod.bias is not None, alpha=alpha\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n new_mod.weight = mod.weight\n new_mod.bias = mod.bias\n # TODO: test when creation is on cuda\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod\n\n def to_inference(self):\n \"\"\"\n Calculates the smoothquant scale based on calibration\n in preparation for inference\n \"\"\"\n assert self.x_running_abs_max is not None, \"no calibration data found\"\n self.calibrating = False\n self.smooth_scale = get_scale(\n self.x_running_abs_max,\n torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values,\n alpha=self.alpha,\n )\n self.fold_weight()\n\n def set_debug_x_absmax(self):\n w_absmax = torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values\n self.x_running_abs_max = w_absmax" }, { "identifier": "swap_linear_with_smooth_fq_linear", "path": "torchao/quantization/smoothquant.py", "snippet": "def swap_linear_with_smooth_fq_linear(\n model, skip_fqn_list=None, cur_fqn=\"\", alpha=0.5\n) -> None:\n\n name_to_child = dict(model.named_children())\n for name, child in name_to_child.items():\n if cur_fqn == \"\":\n new_fqn = name\n else:\n new_fqn = f\"{cur_fqn}.{name}\"\n if ((skip_fqn_list is None) or (new_fqn not in skip_fqn_list)) and (\n type(child) in source_cls_to_target_cls.keys()\n ):\n target_cls = source_cls_to_target_cls[type(child)]\n new_child = target_cls.from_float(child, alpha=alpha)\n setattr(model, name, new_child)\n else:\n swap_linear_with_smooth_fq_linear(child, skip_fqn_list, new_fqn, alpha)" }, { "identifier": "Int8DynamicallyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module, changes the\n linear op to a dynamically quantized linear op with symmetric per-token and per-channel\n quantization on the activation and weight respectively.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, q_scales, transposed, shape, **kwargs):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", q_scales.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, q_scales, transposed, shape, **kwargs):\n self.q_scales = q_scales\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n return quant_int8_dynamic_per_token_linear(\n act_mat, w_qtensor.int_data, w_qtensor.q_scales, bias, act_mat.dtype\n )\n\n def dequantize(self, dtype=None):\n \"\"\"\n Obtain the dequantized version of the quantized tensor subclass\n \"\"\"\n dq_t = dequantize_per_channel(\n self.int_data.t(), self.q_scales, 0, self.dtype if dtype is None else dtype\n ).to(self.dtype)\n # data was transposed to dequantize so make sure shape is correct\n return dq_t if not self.transposed else dq_t.t()\n\n def int_repr(self):\n \"\"\"\n Get the internal integer representation of the quantized tensor\n \"\"\"\n return self.int_data if self.transposed else self.int_data.t()\n\n def q_params(self):\n \"\"\"\n Get the quantization scales for the quantized tensor\n \"\"\"\n return {\"q_scales\": self.q_scales}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.q_scales.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"q_scales\"], [self.transposed, self.dtype, self.shape]\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None):\n int_data, q_scales = tensor_data_dict[\"int_data\"], tensor_data_dict[\"q_scales\"]\n transposed, dtype, shape = tensor_attributes\n return cls(int_data, q_scales, transposed, shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride)\n\n @classmethod\n def from_float(cls, input_float, qmin=-128, qmax=127):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int8DynamicallyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n w_int_repr, w_scales, _ = dynamically_quantize_per_channel(\n input_float, qmin, qmax, torch.int8\n )\n # the desired representation shape for fast quantized matmul is\n # transposed compared to how it's stored as a linear weight,\n # i.e. we want in_channels as dim=0 and out_channels (and quantized axis) as dim=1\n # however the external representation of our tensor will maintain the correct\n # shape attribute which needs to be tracked directly.\n int_data = w_int_repr.contiguous().t()\n if cls is not Int8DynamicallyQuantizedLinearWeight:\n int_data = int_data.contiguous()\n return cls(\n int_data, w_scales, False, input_float.shape, dtype=input_float.dtype\n )" }, { "identifier": "Int8WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes the linear op to a weight-only quantized linear op with symmetric\n per-channel quantization on the weight.\n \"\"\"\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_dtype = act_mat.dtype\n y = torch.mm(act_mat.reshape(-1, act_mat.shape[-1]), w_qtensor.int_data.to(act_mat.dtype)) * w_qtensor.q_scales\n y = y.reshape(*act_mat.shape[:-1], y.shape[-1])\n if bias is not None:\n y += bias\n return y.to(orig_dtype)" }, { "identifier": "Int4WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes that linear op to a weight-only int4 quantized linear op with groupwise\n affine quantization on the weight.\n \"\"\"\n\n @staticmethod\n def __new__(\n cls,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize=128,\n inner_k_tiles=8,\n **kwargs,\n ):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", scales_and_zeros.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(\n self,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize,\n inner_k_tiles,\n **kwargs,\n ):\n # the transposed flag tracks whether the tensor subclass has been transposed relative\n # to how a weight is normally stored in a linear i.e. [out_features, in_features].\n # tracking both transposed and shape is slightly redundant but corner cases like\n # square matrices can cause issues otherwise\n self.scales_and_zeros = scales_and_zeros\n self.groupsize = groupsize\n self.inner_k_tiles = inner_k_tiles\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_act_size = act_mat.size()\n orig_dtype = act_mat.dtype\n\n # reshape and pad activation\n act_mat = act_mat.reshape(-1, act_mat.shape[-1]).to(torch.bfloat16)\n pad_size = find_multiple(act_mat.shape[-1], 1024)\n act_mat = torch.nn.functional.pad(act_mat, (0, pad_size - act_mat.shape[-1]))\n\n # matmul\n y = aten._weight_int4pack_mm(\n act_mat.contiguous(), w_qtensor.int_data, w_qtensor.groupsize, w_qtensor.scales_and_zeros\n )\n\n # remove out_feature padding\n orig_out_features = w_qtensor.shape[-1] if w_qtensor.transposed else w_qtensor.shape[-2]\n y = y[:, :orig_out_features]\n\n y = y.reshape(*orig_act_size[:-1], orig_out_features)\n if bias is not None:\n y += bias\n return y.to(orig_dtype)\n\n def dequantize(self):\n eye_shape = self.shape[1] if not self.transposed else self.shape[0]\n w_dq = self._quantized_op(\n torch.eye(eye_shape, device=self.device, dtype=self.dtype), self, None\n )\n # we dequantized using linear with the identity matrix, output has shape [in_channels, out_channels]\n # so we need to transpose back to get the original shape unless self.transposed is set.\n w_dq = w_dq if self.transposed else w_dq.t()\n return w_dq.to(self.dtype)\n\n def int_repr(self):\n return self.int_data\n\n def q_params(self):\n scales, zero_points = unpack_tinygemm_scales_and_zeros(\n self.scales_and_zeros,\n )\n return {\"q_scales\": scales, \"q_zero_points\": zero_points}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.scales_and_zeros.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data),\n fn(self.scales_and_zeros),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype,\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data,\n self.scales_and_zeros,\n self.transposed,\n shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"scales_and_zeros\"], (\n self.transposed,\n self.groupsize,\n self.inner_k_tiles,\n self.dtype,\n self.shape\n )\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, attributes, outer_size=None, outer_stride=None):\n int_data, scales_and_zeros = (\n tensor_data_dict[\"int_data\"],\n tensor_data_dict[\"scales_and_zeros\"],\n )\n transposed, groupsize, inner_k_tiles, dtype, shape = attributes\n return cls(\n int_data,\n scales_and_zeros,\n transposed,\n shape if outer_size is None else outer_size,\n groupsize,\n inner_k_tiles,\n dtype=dtype,\n strides=outer_stride,\n )\n\n @classmethod\n def from_float(cls, input_float, groupsize=128, inner_k_tiles=8):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int4WeightOnlyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n assert groupsize in [256, 128, 64, 32]\n assert inner_k_tiles in [8, 4, 2]\n orig_shape = input_float.shape\n orig_out_features, orig_in_features = input_float.shape\n\n # padding\n in_features = find_multiple(orig_in_features, 1024)\n out_features = find_multiple(orig_out_features, 8)\n input_float = torch.nn.functional.pad(\n input_float, (0, in_features - orig_in_features, 0, out_features - orig_out_features)\n )\n\n # quantization and packing\n input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor(\n input_float, 4, groupsize\n )\n int_data = aten._convert_weight_to_int4pack(\n input_int4x8, inner_k_tiles\n )\n\n return cls(\n int_data,\n scales_and_zeros,\n False,\n orig_shape,\n groupsize,\n inner_k_tiles,\n dtype=input_float.dtype,\n )" }, { "identifier": "_apply_logging_hook", "path": "torchao/quantization/utils.py", "snippet": "def find_multiple(n: int, k: int) -> int:\ndef compute_error(x, y):\ndef _get_logging_hook(fqn):\n def forward_hook(module, input):\ndef _apply_logging_hook(model):\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\ndef get_model_size_in_bytes(model):\nclass LoggingTensorMode(TorchDispatchMode):" } ]
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
11,462
x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W) smoothquant_scale = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) # reproduce scaled calculation X_scaled = X / smoothquant_scale.reshape(1, 1, -1) W_scaled = torch.matmul(torch.diag(smoothquant_scale), W) X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled) assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!" assert X_mul_W.shape == X_scaled_mul_scaled_W.shape # next, run the above test on a sample of representative inputs def test_tensors(self): x_shape = (1, 5, 7) w_shape = (7, 9) for i in range(3): X = torch.randn(x_shape) * 10 W = torch.randn(w_shape) s = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) Y = torch.matmul(X, W) Y_ref = torch.matmul( X / s.reshape(1, 1, -1), torch.matmul(torch.diag(s), W), ) assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!" def _test_smooth_linear_impl(self, x_shape, lin_shape, device): # so we can use the full range torch.backends.quantized.engine = "qnnpack" x = torch.randn(*x_shape, device=device) * 9 + 10 lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore activation=None, weight=torch.ao.quantization.default_per_channel_weight_observer, ) lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float( lin_fp32_copy.cpu() ) y_ref = lin_fp32(x) # calibrate the smoothquant versions y_smooth_nocalib = lin_smooth(x) _ = lin_smooth_skip_scaling(x) lin_smooth.to_inference() lin_smooth_skip_scaling.debug_skip_scaling = True lin_smooth_skip_scaling.to_inference() # verify that with scaling turned off, numerics match quantized version y_smooth_fq_only = lin_smooth_skip_scaling(x) y_smooth_fq = lin_smooth(x) y_dynamic_q = lin_dynamic_q(x.cpu()).to(device) # print('y_ref', y_ref) # print('y_smooth_nocalib', y_smooth_nocalib) # print('y_smooth_fq', y_smooth_fq) # print('y_smooth_fq_only', y_smooth_fq_only) # print('y_dynamic_q', y_dynamic_q) sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq) sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q) sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q) # print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq) assert torch.allclose( y_ref, y_smooth_nocalib ), "y_ref not close to y_smooth_nocalib" # after https://github.com/pytorch-labs/ao_benchmarks/pull/32, # numerics do not match exactly between production c++ code # and this Python code # assert torch.allclose( # y_smooth_fq_only, y_dynamic_q, # atol=torch.max(y_smooth_fq_only).item()*0.01, # rtol=0.00001), \ # 'y_smooth_fq_only not close to y_dynamic_q' self.assertTrue(sqnr_smooth_fq.item() >= 40.0) self.assertTrue(sqnr_dynamic_q.item() >= 40.0) self.assertTrue(sqnr_fq.item() >= 40.0) def test_smooth_linear_cpu(self): self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu") def test_smooth_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda") def test_smooth_linear_edge_cases(self): # so we can use the full range torch.backends.quantized.engine = "qnnpack" lin_fp32 = nn.Linear(3, 4) lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( lin_fp32, alpha=0.25 ) # test different ranks x0 = torch.randn(4, 5, 3) x1 = torch.randn(1, 8, 5, 3) x2 = torch.randn(2, 3, 7, 5, 3) # calibrate _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) # inference lin_smooth.to_inference() _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) def test_swap(self): m = nn.Sequential( nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)), nn.Linear(4, 4), ) m_copy = copy.deepcopy(m) swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) # verify all linears are swapped assert isinstance(m_copy[0][0], SmoothFakeDynamicallyQuantizedLinear) assert isinstance(m_copy[0][1], nn.ReLU) # this one was skipped assert isinstance(m_copy[0][2], nn.Linear) assert isinstance(m_copy[1], SmoothFakeDynamicallyQuantizedLinear) # verify results do not change without smoothing x = torch.randn(4, 4) y_ref = m(x) y = m_copy(x) assert torch.allclose(y_ref, y) def test_weight_t_and_non_t_numerics_match(self): # verify that numerics match whether weight is stored # in transposed format (for cuBLAS) vs non-transposed format # (for torch.compile) if not torch.cuda.is_available(): print("no cuda, skip") return dtype = torch.half device = "cuda" lin_ref = nn.Linear(32, 16, dtype=dtype, device=device) lin_eager_t = copy.deepcopy(lin_ref) lin_opt_t = copy.deepcopy(lin_eager_t) lin_opt = copy.deepcopy(lin_eager_t) lin_eager_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_eager_t) lin_opt_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt_t) lin_opt = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt) lin_opt.store_w_int_repr_t = False x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again
x_dq = dequantize_per_tensor(y_vals, y_scale, y_zero_point, float_dtype)
8
2023-11-03 21:27:36+00:00
16k
intellerce/controlanimate
animatediff/pipelines/controlanimation_pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n\n addition_embed_type: Optional[str] = None,\n addition_time_embed_dim: Optional[int] = None,\n\n\n\n\n dropout: float = 0.0,\n encoder_hid_dim: Optional[int] = None,\n encoder_hid_dim_type: Optional[str] = None,\n\n conv_in_kernel: int = 3,\n conv_out_kernel: int = 3,\n\n attention_type: str = \"default\",\n class_embeddings_concat: bool = False,\n mid_block_only_cross_attention: Optional[bool] = None,\n cross_attention_norm: Optional[str] = None,\n addition_embed_type_num_heads=64,\n\n transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,\n\n time_embedding_type: str = \"positional\",\n time_embedding_dim: Optional[int] = None,\n time_embedding_act_fn: Optional[str] = None,\n timestep_post_act: Optional[str] = None,\n time_cond_proj_dim: Optional[int] = None,\n\n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n\n\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n # self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n post_act_fn=timestep_post_act,\n cond_proj_dim=time_cond_proj_dim,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n\n if class_embeddings_concat:\n # The time embeddings are concatenated with the class embeddings. The dimension of the\n # time embeddings passed to the down, middle, and up blocks is twice the dimension of the\n # regular time embeddings\n blocks_time_embed_dim = time_embed_dim * 2\n else:\n blocks_time_embed_dim = time_embed_dim\n\n\n # self.time_embedding = TimestepEmbedding(\n # timestep_input_dim,\n # time_embed_dim,\n # act_fn=act_fn,\n # post_act_fn=timestep_post_act,\n # cond_proj_dim=time_cond_proj_dim,\n # )\n\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n\n\n\n @property\n def attn_processors(self) -> Dict[str, AttentionProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n if hasattr(module, \"get_processor\"):\n processors[f\"{name}.processor\"] = module.get_processor(return_deprecated_lora=True)\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(\n self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False\n ):\n r\"\"\"\n Sets the attention processor to use to compute attention.\n\n Parameters:\n processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n for **all** `Attention` layers.\n\n If `processor` is a dict, the key needs to define the path to the corresponding cross attention\n processor. This is strongly recommended when setting trainable attention processors.\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n module.set_processor(processor, _remove_lora=_remove_lora)\n else:\n module.set_processor(processor.pop(f\"{name}.processor\"), _remove_lora=_remove_lora)\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n\n\n\n\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None, \n\n timestep_cond: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n\n\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n\n # print(\"SHAPE OF RES UNET:\", [a.shape for a in down_block_res_samples])\n # if controlnet ...\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # if controlnet\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, use_safetensors = False, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME\n \n model = cls.from_config(config, **unet_additional_kwargs)\n if use_safetensors:\n WEIGHTS_NAME = SAFETENSORS_WEIGHTS_NAME\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n \n if use_safetensors:\n state_dict = safetensors.torch.load_file(model_file, device=\"cpu\")\n else:\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {len(params)} -> {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "preprocess_image", "path": "animatediff/utils/util.py", "snippet": "def preprocess_image(image):\n if isinstance(image, torch.Tensor):\n return image\n elif isinstance(image, Image.Image):\n image = [image]\n\n if isinstance(image[0], Image.Image):\n w, h = image[0].size\n w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8\n\n image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION[\"lanczos\"]))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n image = np.array(image).astype(np.float32) / 255.0\n image = image.transpose(0, 3, 1, 2)\n image = 2.0 * image - 1.0\n image = torch.from_numpy(image)\n elif isinstance(image[0], torch.Tensor):\n image = torch.cat(image, dim=0)\n return image" }, { "identifier": "MultiControlNetResidualsPipeline", "path": "modules/controlresiduals_pipeline.py", "snippet": "class MultiControlNetResidualsPipeline:\n def __init__(self, hf_controlnet_names, cond_scale, use_lcm):\n cache_dir = 'cache'\n\n self.controlnet_names = hf_controlnet_names\n\n self.controlnets = []\n\n for controlnet_name in hf_controlnet_names:\n self.controlnets.append(ControlNetModel.from_pretrained(controlnet_name, torch_dtype=torch.float16))\n\n # ControlNetModel.from_pretrained(\"lllyasviel/sd-controlnet-openpose\", torch_dtype=torch.float16),\n # ControlNetModel.from_pretrained(\"lllyasviel/sd-controlnet-canny\", torch_dtype=torch.float16),\n\n self.controlnet = MultiControlNetModel(self.controlnets).to('cuda')\n\n self.cond_scale = cond_scale\n\n self.use_lcm = use_lcm\n \n self.ip_adapter = None\n\n # self.multicontrolnet.to('cpu')\n\n def canny_processor(image):\n o_image = np.array(image)\n o_image = cv2.Canny(o_image, 100, 200)\n o_image = o_image[:, :, None]\n o_image = np.concatenate([o_image, o_image, o_image], axis=2)\n o_image = Image.fromarray(o_image)\n return o_image\n self.canny_processor = canny_processor\n self.mlsd_processor = MLSDdetector.from_pretrained('lllyasviel/Annotators', cache_dir = cache_dir,)\n self.openpose_processor = OpenposeDetector.from_pretrained('lllyasviel/Annotators', cache_dir = cache_dir,)\n self.hed_processor = HEDdetector.from_pretrained('lllyasviel/Annotators', cache_dir = cache_dir,)\n self.lineart_anime_processor = LineartAnimeDetector.from_pretrained(\"lllyasviel/Annotators\", cache_dir = cache_dir)\n self.lineart_processor = LineartDetector.from_pretrained(\"lllyasviel/Annotators\", cache_dir = cache_dir)\n self.normalbae_processor = NormalBaeDetector.from_pretrained(\"lllyasviel/Annotators\", cache_dir = cache_dir)\n self.pidi_processor = PidiNetDetector.from_pretrained('lllyasviel/Annotators', cache_dir = cache_dir)\n self.depth_estimate_processor = pipeline('depth-estimation', cache_dir = cache_dir)\n\n date_time = datetime.datetime.now()\n self.date_time = date_time.strftime(\"%Y%m%d_%H%M%S_%f\")\n\n\n def move_to_device(self,controlnet_model, device):\n\n if 'mlsd' in controlnet_model: \n self.mlsd_processor.to(device)\n\n elif 'openpose' in controlnet_model:\n self.openpose_processor.to(device)\n # o_image.show()\n\n elif 'hed' in controlnet_model: \n self.hed_processor.to(device)\n\n elif 'lineart_anime' in controlnet_model:\n self.lineart_anime_processor.to(device)\n\n elif 'lineart' in controlnet_model:\n self.lineart_processor.to(device)\n\n elif 'normalbae' in controlnet_model: \n self.normalbae_processor.to(device)\n \n elif 'softedge' in controlnet_model:\n\n self.pidi_processor.to(device)\n elif 'depth' in controlnet_model:\n self.depth_estimator.to(device)\n\n\n def prepare_controlnet_input_image(self, controlnet_model, image):\n if 'canny' in controlnet_model:\n o_image = self.canny_processor(image)\n\n elif 'mlsd' in controlnet_model: \n w, h = image.size\n detect_resolution=min(h,w)\n image_resolution=min(h,w) \n o_image = self.mlsd_processor(image)\n\n elif 'openpose' in controlnet_model:\n # h, w = image.size\n # detect_resolution=min(h,w)\n # image_resolution=min(h,w) \n # o_image = self.openpose_processor(image,detect_resolution= detect_resolution, image_resolution=image_resolution, hand_and_face=True)\n \n o_image = self.openpose_processor(image, hand_and_face=True)\n # o_image.show()\n\n elif 'hed' in controlnet_model: \n o_image = self.hed_processor(image)\n\n elif 'lineart_anime' in controlnet_model:\n w, h = image.size\n detect_resolution=min(h,w)\n image_resolution=min(h,w) \n o_image = self.lineart_anime_processor(image, detect_resolution= detect_resolution, image_resolution=image_resolution)\n\n elif 'lineart' in controlnet_model:\n w, h = image.size\n detect_resolution = min(h,w)\n image_resolution = min(h,w) \n o_image = self.lineart_processor(image, detect_resolution= detect_resolution, image_resolution=image_resolution)\n\n elif 'normalbae' in controlnet_model: \n o_image = self.normalbae_processor(image)\n \n elif 'softedge' in controlnet_model:\n w, h = image.size\n detect_resolution= min(h,w)\n image_resolution= min(h,w)\n o_image = self.pidi_processor(image, detect_resolution= detect_resolution, image_resolution=image_resolution)\n\n elif 'depth' in controlnet_model:\n o_image = self.depth_estimator(image)['depth']\n o_image = np.array(o_image)\n o_image = o_image[:, :, None]\n o_image = np.concatenate([image, image, image], axis=2)\n o_image = Image.fromarray(o_image)\n\n else:\n raise Exception(f\"ControlNet model {controlnet_model} is not supported at this time.\")\n\n return o_image\n\n\n\n def prepare_image(\n self,\n image,\n width,\n height,\n batch_size,\n num_images_per_prompt,\n device,\n dtype,\n do_classifier_free_guidance=False,\n guess_mode=False,\n ):\n image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)\n\n \n\n image_batch_size = image.shape[0]\n\n if image_batch_size == 1:\n repeat_by = batch_size\n else:\n # image batch size is the same as prompt batch size\n repeat_by = num_images_per_prompt\n\n image = image.repeat_interleave(repeat_by, dim=0)\n\n image = image.to(device=device, dtype=dtype)\n\n # if do_classifier_free_guidance and not guess_mode:\n # image = torch.cat([image] * 2)\n\n return image\n\n\n def prepare_images(self, \n image,\n width, \n height,\n batch_size,\n num_images_per_prompt,\n device,\n controlnet\n ):\n images = []\n\n for image_ in image:\n # height, width = image_.size\n batch_size = 1\n num_images_per_prompt = 1\n device = 'cuda'\n image_ = self.prepare_image(\n image=image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=True,\n guess_mode=False,\n )\n\n images.append(image_)\n\n image = images\n height, width = image[0].shape[-2:]\n return image\n\n\n \n\n\n def prep_control_images(self, images,\n control_image_processor,\n epoch = 0,\n output_dir = 'tmp/output',\n save_outputs = True,\n do_classifier_free_guidance = True,\n guess_mode = False,\n ):\n\n # print(date_time)\n\n # self.input_images = images\n\n output_dir = os.path.join(output_dir, f'controlnet_outputs_{self.date_time}')\n \n self.control_image_processor = control_image_processor\n \n \n\n self.controlnet.to('cuda')\n\n prep_images = []\n for ctrl_name in self.controlnet_names:\n out_dir = os.path.join(output_dir, ctrl_name)\n if not os.path.exists(out_dir) and save_outputs:\n os.makedirs(out_dir)\n\n self.move_to_device(ctrl_name, 'cuda')\n ctrl_images = None\n for i, image in tqdm(enumerate(images)):\n width, height = image.size\n prep_image = self.prepare_controlnet_input_image(ctrl_name, image)\n if save_outputs:\n prep_image.save(os.path.join(out_dir,\"{}_{:04d}.png\".format(epoch,i)))\n prep_image = self.prepare_images([prep_image], width, height ,1,1,'cuda',self.controlnet)\n if ctrl_images is not None:\n ctrl_images = torch.cat([ctrl_images, prep_image[0]])\n else:\n ctrl_images = prep_image[0]\n \n \n \n if do_classifier_free_guidance and not guess_mode and not self.use_lcm:\n ctrl_images = torch.cat([ctrl_images] * 2)\n\n prep_images.append(ctrl_images)\n \n self.prep_images = prep_images\n\n \n\n\n def __call__(self, \n control_model_input,\n t,\n controlnet_prompt_embeds, \n frame_count,\n image_embeds = None,\n do_classifier_free_guidance = True,\n guess_mode = True):\n \n control_model_input = rearrange(control_model_input, 'b c f h w -> (b f) c h w' )\n\n # IP Adapter\n # if self.ip_adapter is not None:\n\n controlnet_prompt_embeds = torch.cat([controlnet_prompt_embeds] * frame_count)\n\n down_block_res_samples_multi, mid_block_res_sample_multi = self.controlnet(\n control_model_input.half(), #[:,:,i,:,:].half(),\n t,\n encoder_hidden_states=controlnet_prompt_embeds.half(),\n controlnet_cond=self.prep_images,\n conditioning_scale=self.cond_scale,\n guess_mode=guess_mode,\n return_dict=False,\n )\n\n # tuples are un-assinagnable so we first convert to lists and then convert back\n down_block_additional_residuals = list(down_block_res_samples_multi) \n \n # Re-arranging the outputs of controlnet(s) to account for the frames\n for i, tensor in enumerate(down_block_additional_residuals):\n down_block_additional_residuals[i] = rearrange(tensor, '(b f) c h w -> b c f h w', f = frame_count)\n\n \n mid_block_additional_residual = rearrange(mid_block_res_sample_multi, '(b f) c h w -> b c f h w', f = frame_count)\n\n down_block_additional_residuals = tuple(down_block_additional_residuals)\n\n return down_block_additional_residuals, mid_block_additional_residual" } ]
import inspect import numpy as np import torch import PIL import math from typing import Callable, List, Optional, Union, Dict, Any from dataclasses import dataclass from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, UniPCMultistepScheduler ) from diffusers.utils import BaseOutput from diffusers.utils import ( deprecate, logging, # replace_example_docstring, # scale_lora_layers, # unscale_lora_layers, ) from einops import rearrange from ..models.unet import UNet3DConditionModel from ..utils.util import preprocess_image from diffusers.utils.torch_utils import randn_tensor from modules.controlresiduals_pipeline import MultiControlNetResidualsPipeline from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.utils.torch_utils import randn_tensor from diffusers import SchedulerMixin, ConfigMixin from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import register_to_config from accelerate import cpu_offload
12,605
# check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, input_frames, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latent_timestep, overlaps, strength, latents=None, last_output_frames = None, use_lcm = False, use_img2img = False, ): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # torch.randn(shape, generator=generator, dtype=dtype) #.to(device) , device=rand_device last_output_frames_latents = [] if overlaps > 0 or strength < 1 or use_lcm: frames_latents = [] if input_frames is not None: for i , frame in enumerate(input_frames): image = frame image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents frames_latents.append(frame_latents) if last_output_frames is not None: for image in last_output_frames: image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents last_output_frames_latents.append(frame_latents) latents = latents.to(device) if use_lcm: frames_latents_tensor = torch.stack(frames_latents,dim=2) latents = self.scheduler.add_noise(frames_latents_tensor, latents.to(device), latent_timestep) elif last_output_frames is not None and strength < 1.0: # for i in range(len(last_output_frames_latents)): for i in range(video_length): if i < len(last_output_frames): latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) else: if not use_img2img: latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[-1], latents[:, :, i, :, :].to(device), latent_timestep) else: latents[:, :, i, :, :] = self.scheduler.add_noise(frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler if strength >= 1 and not use_lcm: latents = latents * self.scheduler.init_noise_sigma return latents def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start @torch.no_grad() def __call__( self, video_length: Optional[int], input_frames: list = None, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, strength: float = 0.5, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, overlaps: int = 0,
# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name # For LCM: @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class ControlAnimationPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, None # LCM ], ): super().__init__() scheduler = ( scheduler if scheduler is not None else LCMScheduler( beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon" ) ) if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.latents = None self.frames_latents = None self.prev_frames_latents = None self.ip_adapter = None def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale # if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) # else: # scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: procecss multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers # unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps: torch.Tensor: generate embedding vectors at these timesteps embedding_dim: int: dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / self.vae.config.scaling_factor * latents # 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, input_frames, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latent_timestep, overlaps, strength, latents=None, last_output_frames = None, use_lcm = False, use_img2img = False, ): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # torch.randn(shape, generator=generator, dtype=dtype) #.to(device) , device=rand_device last_output_frames_latents = [] if overlaps > 0 or strength < 1 or use_lcm: frames_latents = [] if input_frames is not None: for i , frame in enumerate(input_frames): image = frame image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents frames_latents.append(frame_latents) if last_output_frames is not None: for image in last_output_frames: image = self.image_processor.preprocess(image) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) frame_latents = self.vae.encode(image).latent_dist.sample(generator) frame_latents = self.vae.config.scaling_factor * frame_latents last_output_frames_latents.append(frame_latents) latents = latents.to(device) if use_lcm: frames_latents_tensor = torch.stack(frames_latents,dim=2) latents = self.scheduler.add_noise(frames_latents_tensor, latents.to(device), latent_timestep) elif last_output_frames is not None and strength < 1.0: # for i in range(len(last_output_frames_latents)): for i in range(video_length): if i < len(last_output_frames): latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) else: if not use_img2img: latents[:, :, i, :, :] = self.scheduler.add_noise(last_output_frames_latents[-1], latents[:, :, i, :, :].to(device), latent_timestep) else: latents[:, :, i, :, :] = self.scheduler.add_noise(frames_latents[i], latents[:, :, i, :, :].to(device), latent_timestep) latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler if strength >= 1 and not use_lcm: latents = latents * self.scheduler.init_noise_sigma return latents def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start @torch.no_grad() def __call__( self, video_length: Optional[int], input_frames: list = None, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, strength: float = 0.5, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, overlaps: int = 0,
multicontrolnetresiduals_pipeline: Optional[MultiControlNetResidualsPipeline] = None,
2
2023-11-04 01:35:44+00:00
16k
Zaczero/openstreetmap-ng
src/controllers/api/0.6/changeset_comment.py
[ { "identifier": "api_user", "path": "src/lib/auth.py", "snippet": "def api_user(*require_scopes: Scope | ExtendedScope) -> User:\n \"\"\"\n Dependency for authenticating the api user.\n \"\"\"\n\n return Security(\n _get_user,\n scopes=tuple(s.value for s in require_scopes),\n )" }, { "identifier": "Format06", "path": "src/lib/format/format06.py", "snippet": "class Format06:\n @staticmethod\n def _encode_tags(tags: dict) -> Sequence[dict] | dict:\n if format_is_json():\n return tags\n else:\n return tuple({'@k': k, '@v': v} for k, v in tags.items())\n\n @staticmethod\n def _decode_tags_unsafe(tags: Sequence[dict]) -> dict:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_tags_unsafe([\n ... {'@k': 'a', '@v': '1'},\n ... {'@k': 'b', '@v': '2'},\n ... ])\n {'a': '1', 'b': '2'}\n \"\"\"\n\n items = tuple((tag['@k'], tag['@v']) for tag in tags)\n result = dict(items)\n\n if len(items) != len(result):\n raise ValueError('Duplicate tags keys')\n\n return result\n\n @staticmethod\n def decode_tags_and_validate(tags: Sequence[dict]) -> dict:\n \"\"\"\n >>> decode_tags_and_validate([\n ... {'@k': 'a', '@v': '1'},\n ... {'@k': 'b', '@v': '2'},\n ... ])\n {'a': '1', 'b': '2'}\n \"\"\"\n\n return TagsValidating(tags=Format06._decode_tags_unsafe(tags)).tags\n\n @staticmethod\n def _encode_point(point: Point | None) -> dict:\n \"\"\"\n >>> _encode_point(Point(1, 2))\n {'@lon': 1, '@lat': 2}\n \"\"\"\n\n if not point:\n return {}\n\n return {\n XAttr('lon'): point.x,\n XAttr('lat'): point.y,\n }\n\n @staticmethod\n def _decode_point_unsafe(data: dict) -> Point | None:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_point_unsafe({'@lon': '1', '@lat': '2'})\n POINT (1 2)\n \"\"\"\n\n if (lon := data.get('@lon')) is None or (lat := data.get('@lat')) is None:\n return None\n\n return Point(\n float(lon),\n float(lat),\n )\n\n @staticmethod\n def _encode_nodes(nodes: Sequence[ElementMemberRef]) -> Sequence[dict] | Sequence[int]:\n \"\"\"\n >>> _encode_nodes([\n ... ElementMember(type=ElementType.node, typed_id=1, role=''),\n ... ElementMember(type=ElementType.node, typed_id=2, role=''),\n ... ])\n ({'@ref': 1}, {'@ref': 2})\n \"\"\"\n\n if format_is_json():\n return tuple(node.typed_id for node in nodes)\n else:\n return tuple({'@ref': node.typed_id} for node in nodes)\n\n @staticmethod\n def _decode_nodes_unsafe(nodes: Sequence[dict]) -> Sequence[ElementMemberRef]:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_nodes_unsafe([{'@ref': '1'}])\n [ElementMember(type=ElementType.node, typed_id=1, role='')]\n \"\"\"\n\n return tuple(\n ElementMemberRef(\n type=ElementType.node,\n typed_id=int(node['@ref']),\n role='',\n )\n for node in nodes\n )\n\n @staticmethod\n def _encode_members(members: Sequence[ElementMemberRef]) -> Sequence[dict]:\n \"\"\"\n >>> _encode_members([\n ... ElementMember(type=ElementType.node, typed_id=1, role='a'),\n ... ElementMember(type=ElementType.way, typed_id=2, role='b'),\n ... ])\n (\n {'@type': 'node', '@ref': 1, '@role': 'a'},\n {'@type': 'way', '@ref': 2, '@role': 'b'},\n )\n \"\"\"\n\n return tuple(\n {\n XAttr('type'): member.type.value,\n XAttr('ref'): member.typed_id,\n XAttr('role'): member.role,\n }\n for member in members\n )\n\n @staticmethod\n def _decode_members_unsafe(members: Sequence[dict]) -> Sequence[ElementMemberRef]:\n \"\"\"\n This method does not validate the input data.\n\n >>> _decode_members_unsafe([\n ... {'@type': 'node', '@ref': '1', '@role': 'a'},\n ... ])\n [ElementMember(type=ElementType.node, typed_id=1, role='a')]\n \"\"\"\n\n return tuple(\n ElementMemberRef(\n type=ElementType.from_str(member['@type']),\n typed_id=int(member['@ref']),\n role=member['@role'],\n )\n for member in members\n )\n\n @staticmethod\n def encode_element(element: Element) -> dict:\n \"\"\"\n >>> encode_element(Element(type=ElementType.node, typed_id=1, version=1, ...))\n {'node': {'@id': 1, '@version': 1, ...}}\n \"\"\"\n\n if format_is_json():\n return {\n 'type': element.type.value,\n 'id': element.typed_id,\n **(Format06._encode_point(element.point) if element.type == ElementType.node else {}),\n 'version': element.version,\n 'timestamp': element.created_at,\n 'changeset': element.changeset_id,\n 'uid': element.user_id,\n 'user': element.user.display_name,\n 'visible': element.visible,\n 'tags': element.tags,\n **({'nodes': Format06._encode_nodes(element.members)} if element.type == ElementType.way else {}),\n **(\n {'members': Format06._encode_members(element.members)}\n if element.type == ElementType.relation\n else {}\n ),\n }\n else:\n return {\n element.type.value: {\n '@id': element.typed_id,\n **(Format06._encode_point(element.point) if element.type == ElementType.node else {}),\n '@version': element.version,\n '@timestamp': element.created_at,\n '@changeset': element.changeset_id,\n '@uid': element.user_id,\n '@user': element.user.display_name,\n '@visible': element.visible,\n 'tag': Format06._encode_tags(element.tags),\n **({'nd': Format06._encode_nodes(element.members)} if element.type == ElementType.way else {}),\n **(\n {'member': Format06._encode_members(element.members)}\n if element.type == ElementType.relation\n else {}\n ),\n }\n }\n\n @staticmethod\n def decode_element(element: dict, changeset_id: int | None) -> Element:\n \"\"\"\n If `changeset_id` is `None`, it will be extracted from the element data.\n \"\"\"\n\n if len(element) != 1:\n raise ValueError(f'Expected one root element, got {len(element)}')\n\n type, data = next(iter(element.items()))\n type = ElementType.from_str(type)\n data: dict\n\n # decode members from either nd or member\n if data_nodes := data.get('nd'):\n members = Format06._decode_nodes_unsafe(data_nodes)\n elif data_members := data.get('member'):\n members = Format06._decode_members_unsafe(data_members)\n else:\n members = ()\n\n return Element(\n **ElementValidating(\n user_id=auth_user().id,\n changeset_id=changeset_id or data.get('@changeset'),\n type=type,\n typed_id=data.get('@id'),\n version=data.get('@version', 0) + 1,\n visible=data.get('@visible', True),\n tags=Format06._decode_tags_unsafe(data.get('tag', ())),\n point=Format06._decode_point_unsafe(data),\n members=members,\n ).to_orm_dict()\n )\n\n @staticmethod\n def encode_elements(elements: Sequence[Element]) -> dict[str, Sequence[dict]]:\n \"\"\"\n >>> encode_elements([\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.way, typed_id=2, version=1,\n ... ])\n {'node': [{'@id': 1, '@version': 1, ...}], 'way': [{'@id': 2, '@version': 1, ...}]}\n \"\"\"\n\n if format_is_json():\n return {'elements': tuple(Format06.encode_element(element) for element in elements)}\n else:\n result: dict[str, list[dict]] = defaultdict(list)\n for element in elements:\n result[element.type.value].append(Format06.encode_element(element))\n return result\n\n @staticmethod\n def _encode_changeset_comment(comment: ChangesetComment) -> dict:\n \"\"\"\n >>> _encode_changeset_comment(ChangesetComment(...))\n {'@uid': 1, '@user': ..., '@date': ..., 'text': 'lorem ipsum'}\n \"\"\"\n\n return {\n XAttr('id'): comment.id,\n XAttr('date'): comment.created_at,\n XAttr('uid'): comment.user_id,\n XAttr('user'): comment.user.display_name,\n 'text': comment.body,\n }\n\n @staticmethod\n def encode_changeset(changeset: Changeset, *, add_comments_count: int = 0) -> dict:\n \"\"\"\n >>> encode_changeset(Changeset(...))\n {'changeset': {'@id': 1, '@created_at': ..., ..., 'discussion': {'comment': [...]}}}\n \"\"\"\n\n if changeset.boundary:\n minx, miny, maxx, maxy = changeset.boundary.bounds\n boundary_d = {\n XAttr('minlon', custom_xml='min_lon'): minx,\n XAttr('minlat', custom_xml='min_lat'): miny,\n XAttr('maxlon', custom_xml='max_lon'): maxx,\n XAttr('maxlat', custom_xml='max_lat'): maxy,\n }\n else:\n boundary_d = {}\n\n try:\n _ = changeset.comments\n has_comments = True\n except InvalidRequestError:\n has_comments = False\n\n if format_is_json():\n return {\n 'type': 'changeset',\n 'id': changeset.id,\n 'created_at': changeset.created_at,\n **({'closed_at': changeset.closed_at} if changeset.closed_at else {}),\n 'open': not changeset.closed_at,\n 'uid': changeset.user_id,\n 'user': changeset.user.display_name,\n **boundary_d,\n 'comments_count': len(changeset.comments) + add_comments_count,\n 'changes_count': changeset.size,\n 'tags': changeset.tags,\n **(\n {'discussion': tuple(Format06._encode_changeset_comment(comment) for comment in changeset.comments)}\n if has_comments\n else {}\n ),\n }\n else:\n return {\n 'changeset': {\n '@id': changeset.id,\n '@created_at': changeset.created_at,\n **({'@closed_at': changeset.closed_at} if changeset.closed_at else {}),\n '@open': not changeset.closed_at,\n '@uid': changeset.user_id,\n '@user': changeset.user.display_name,\n **boundary_d,\n '@comments_count': len(changeset.comments) + add_comments_count,\n '@changes_count': changeset.size,\n 'tag': Format06._encode_tags(changeset.tags),\n **(\n {\n 'discussion': {\n 'comment': tuple(\n Format06._encode_changeset_comment(comment) for comment in changeset.comments\n ),\n }\n }\n if has_comments\n else {}\n ),\n }\n }\n\n @staticmethod\n def encode_changesets(changesets: Sequence[Changeset]) -> dict:\n \"\"\"\n >>> encode_changesets([\n ... Changeset(...),\n ... Changeset(...),\n ... ])\n {'changeset': [{'@id': 1, '@created_at': ..., ..., 'discussion': {'comment': [...]}}]}\n \"\"\"\n\n if format_is_json():\n return {'elements': tuple(Format06.encode_changeset(changeset) for changeset in changesets)}\n else:\n return {'changeset': tuple(Format06.encode_changeset(changeset)['changeset'] for changeset in changesets)}\n\n @staticmethod\n def encode_osmchange(elements: Sequence[Element]) -> Sequence[tuple[str, dict]]:\n \"\"\"\n >>> encode_osmchange([\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.way, typed_id=2, version=2, ...)\n ... ])\n [\n ('create', {'node': [{'@id': 1, '@version': 1, ...}]}),\n ('modify', {'way': [{'@id': 2, '@version': 2, ...}]}),\n ]\n \"\"\"\n\n result = [None] * len(elements)\n for i, element in len(elements):\n if element.version == 1:\n action = OSMChangeAction.create.value\n elif element.visible:\n action = OSMChangeAction.modify.value\n else:\n action = OSMChangeAction.delete.value\n result[i] = (action, Format06.encode_element(element))\n return result\n\n @staticmethod\n def decode_osmchange(elements: Sequence[tuple[str, dict]], changeset_id: int | None) -> Sequence[Element]:\n \"\"\"\n If `changeset_id` is `None`, it will be extracted from the element data.\n\n >>> decode_osmchange([\n ... ('create', {'node': [{'@id': 1, '@version': 1, ...}]}),\n ... ('modify', {'way': [{'@id': 2, '@version': 2, ...}]}),\n ... ])\n [Element(type=ElementType, ...), Element(type=ElementType.way, ...)]\n \"\"\"\n\n result = [None] * len(elements)\n\n for i, (action, element_d) in enumerate(elements):\n if len(element_d) != 1:\n raise ValueError(f'Expected one element in {action!r}, got {len(element_d)}')\n\n element = Format06.decode_element(element_d, changeset_id)\n\n if action == OSMChangeAction.create.value:\n if element.id > 0:\n raise_for().diff_create_bad_id(element.versioned_ref)\n if element.version > 1:\n element.version = 1\n elif action == OSMChangeAction.modify.value:\n if element.version < 2:\n raise_for().diff_update_bad_version(element.versioned_ref)\n elif action == OSMChangeAction.delete.value:\n if element.version < 2:\n raise_for().diff_update_bad_version(element.versioned_ref)\n if element.visible:\n element.visible = False\n else:\n raise_for().diff_unsupported_action(action)\n\n result[i] = element\n\n return result\n\n @staticmethod\n def encode_diff_result(assigned_ref_map: dict[TypedElementRef, Sequence[Element]]) -> Sequence[tuple]:\n \"\"\"\n >>> encode_diff_result({\n ... TypedElementRef(type=ElementType.node, typed_id=-1): [\n ... Element(type=ElementType.node, typed_id=1, version=1, ...),\n ... Element(type=ElementType.node, typed_id=1, version=2, ...),\n ... ],\n ... })\n (\n ('node', {'@old_id': -1, '@new_id': 1, '@new_version': 1}),\n ('node', {'@old_id': -1, '@new_id': 1, '@new_version': 2})\n )\n \"\"\"\n\n return tuple(\n (\n typed_ref.type.value,\n {\n '@old_id': typed_ref.typed_id,\n '@new_id': element.typed_id,\n '@new_version': element.version,\n },\n )\n for typed_ref, elements in assigned_ref_map.items()\n for element in elements\n )\n\n @staticmethod\n def encode_tracks(trace_points: Sequence[TracePoint]) -> dict:\n \"\"\"\n >>> encode_tracks([\n ... TracePoint(...),\n ... TracePoint(...),\n ... ])\n {'trk': [{'trkseg': [{'trkpt': [{'@lon': 1, '@lat': 2}, {'@lon': 3, '@lat': 4}]}]}]}\n \"\"\"\n\n trks = []\n trk_trksegs = []\n trk_trkseg_trkpts = []\n\n last_trk_id = None\n last_trkseg_id = None\n\n for tp in trace_points:\n trace = tp.trace\n\n # if trace is available via api, encode full information\n if trace.timestamps_via_api:\n # handle track change\n if last_trk_id != trace.id:\n if trace.visibility == TraceVisibility.identifiable:\n url = f'/user/permalink/{trace.user_id}/traces/{trace.id}'\n else:\n url = None\n\n trk_trksegs = []\n trks.append(\n {\n 'name': trace.name,\n 'desc': trace.description,\n **({'url': url} if url else {}),\n 'trkseg': trk_trksegs,\n }\n )\n last_trk_id = trace.id\n last_trkseg_id = None\n\n # handle track segment change\n if last_trkseg_id != tp.track_idx:\n trk_trkseg_trkpts = []\n trk_trksegs.append({'trkpt': trk_trkseg_trkpts})\n last_trkseg_id = tp.track_idx\n\n # add point\n trk_trkseg_trkpts.append(\n {\n **Format06._encode_point(tp.point),\n **({'ele': tp.elevation} if tp.elevation is not None else {}),\n 'time': tp.captured_at,\n }\n )\n\n # otherwise, encode only coordinates\n else:\n # handle track and track segment change\n if last_trk_id is not None or last_trkseg_id is not None:\n trk_trksegs = []\n trks.append({'trkseg': trk_trksegs})\n trk_trkseg_trkpts = []\n trk_trksegs.append({'trkpt': trk_trkseg_trkpts})\n last_trk_id = None\n last_trkseg_id = None\n\n trk_trkseg_trkpts.append(Format06._encode_point(tp.point))\n\n return {'trk': trks}\n\n @staticmethod\n def decode_tracks(tracks: Sequence[dict], *, track_idx_start: int = 0) -> Sequence[TracePoint]:\n \"\"\"\n >>> decode_tracks([{'trkseg': [{'trkpt': [{'@lon': 1, '@lat': 2}]}]}])\n [TracePoint(...)]\n \"\"\"\n\n result = []\n\n for trk in tracks:\n trk: dict\n for track_idx, trkseg in enumerate(trk.get('trkseg', []), track_idx_start):\n trkseg: dict\n for trkpt in trkseg.get('trkpt', []):\n trkpt: dict\n\n result.append(\n TracePoint(\n **TracePointValidating(\n track_idx=track_idx,\n captured_at=datetime.fromisoformat(time) if (time := trkpt.get('time')) else None,\n point=Format06._decode_point_unsafe(trkpt),\n elevation=trkpt.get('ele'),\n ).to_orm_dict()\n )\n )\n\n return result\n\n @staticmethod\n def encode_gpx_file(trace: Trace) -> dict:\n \"\"\"\n >>> encode_gpx_file(Trace(...))\n {'gpx_file': {'@id': 1, '@uid': 1234, ...}}\n \"\"\"\n\n return {\n 'gpx_file': {\n '@id': trace.id,\n '@uid': trace.user_id,\n '@user': trace.user.display_name,\n '@timestamp': trace.created_at,\n '@name': trace.name,\n '@lon': trace.start_point.x,\n '@lat': trace.start_point.y,\n '@visibility': trace.visibility.value,\n '@pending': False,\n 'description': trace.description,\n 'tag': trace.tags,\n }\n }\n\n @staticmethod\n def encode_gpx_files(traces: Sequence[Trace]) -> dict:\n \"\"\"\n >>> encode_gpx_files([\n ... Trace(...),\n ... Trace(...),\n ... ])\n {'gpx_file': [{'@id': 1, '@uid': 1234, ...}, {'@id': 2, '@uid': 1234, ...}]}\n \"\"\"\n\n return {\n 'gpx_file': tuple(Format06.encode_gpx_file(trace) for trace in traces),\n }\n\n @staticmethod\n def decode_gpx_file(gpx_file: dict) -> Trace:\n return Trace(\n **TraceValidating(\n user_id=auth_user().id,\n name=gpx_file.get('@name'),\n description=gpx_file.get('description'),\n visibility=TraceVisibility(gpx_file.get('@visibility')),\n size=1,\n start_point=Point(0, 0),\n tags=gpx_file.get('tag', ()),\n ).to_orm_dict()\n )\n\n @staticmethod\n def _encode_note_comment(comment: NoteComment) -> dict:\n \"\"\"\n >>> _encode_note_comment(NoteComment(...))\n {'date': '2019-06-15 08:26:04 UTC', 'uid': 1234, 'user': 'userName', ...}\n \"\"\"\n\n return {\n 'date': format_sql_date(comment.created_at),\n 'uid': comment.user_id,\n 'user': comment.user.display_name,\n 'user_url': comment.user.permalink,\n 'action': comment.event.value,\n 'text': comment.body,\n 'html': comment.body_rich.value, # a disaster waiting to happen\n }\n\n @staticmethod\n def encode_note(note: Note) -> dict:\n \"\"\"\n >>> encode_note(Note(...))\n {'note': {'@lon': 0.1, '@lat': 51, 'id': 16659, ...}}\n \"\"\"\n\n style = format_style()\n\n if style == FormatStyle.json:\n return {\n 'type': 'Feature',\n 'geometry': mapping(note.point),\n 'properties': {\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}.json',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen.json',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment.json',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close.json',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'closed_at': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n 'comments': tuple(Format06._encode_note_comment(comment) for comment in note.comments),\n },\n }\n elif style == FormatStyle.gpx:\n return {\n 'wpt': {\n **Format06._encode_point(note.point),\n 'time': note.created_at,\n 'name': f'Note: {note.id}',\n 'link': {'href': note.permalink},\n 'desc': ET.CDATA(render('api/0.6/note_comments_rss.jinja2', comments=note.comments)),\n 'extensions': {\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}.gpx',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen.gpx',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment.gpx',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close.gpx',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'date_closed': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n },\n }\n }\n else:\n return {\n 'note': {\n **Format06._encode_point(note.point),\n 'id': note.id,\n 'url': f'{API_URL}/api/0.6/notes/{note.id}',\n **(\n {\n 'reopen_url': f'{API_URL}/api/0.6/notes/{note.id}/reopen',\n }\n if note.closed_at\n else {\n 'comment_url': f'{API_URL}/api/0.6/notes/{note.id}/comment',\n 'close_url': f'{API_URL}/api/0.6/notes/{note.id}/close',\n }\n ),\n 'date_created': format_sql_date(note.created_at),\n **({'date_closed': format_sql_date(note.closed_at)} if note.closed_at else {}),\n 'status': note.status.value,\n 'comments': {\n 'comment': tuple(Format06._encode_note_comment(comment) for comment in note.comments),\n },\n }\n }\n\n @staticmethod\n def encode_notes(notes: Sequence[Note]) -> dict:\n \"\"\"\n >>> encode_notes([\n ... Note(...),\n ... Note(...),\n ... ])\n {'note': [{'@lon': 1, '@lat': 2, 'id': 1, ...}]}\n \"\"\"\n\n style = format_style()\n\n if style == FormatStyle.json:\n return {'type': 'FeatureCollection', 'features': tuple(Format06.encode_note(note) for note in notes)}\n elif style == FormatStyle.gpx:\n return {'wpt': tuple(Format06.encode_note(note)['wpt'] for note in notes)}\n else:\n return {'note': tuple(Format06.encode_note(note)['note'] for note in notes)}\n\n @staticmethod\n def _encode_languages(languages: Sequence[str]) -> dict | Sequence[str]:\n \"\"\"\n >>> _encode_languages(['en', 'pl'])\n {'lang': ('en', 'pl')}\n \"\"\"\n\n if format_is_json():\n return tuple(languages)\n else:\n return {'lang': tuple(languages)}\n\n @staticmethod\n async def encode_user(user: User) -> dict:\n \"\"\"\n >>> encode_user(User(...))\n {'user': {'@id': 1234, '@display_name': 'userName', ...}}\n \"\"\"\n\n current_user = auth_user()\n access_private = current_user and current_user.id == user.id\n\n changesets_count = 0\n traces_count = 0\n block_received_count = 0\n block_received_active_count = 0\n block_issued_count = 0\n block_issued_active_count = 0\n messages_received_count = 0\n messages_received_unread_count = 0\n messages_sent_count = 0\n\n async def changesets_count_task() -> None:\n nonlocal changesets_count\n changesets_count = await ChangesetRepository.count_by_user_id(user.id)\n\n async def traces_count_task() -> None:\n nonlocal traces_count\n traces_count = await TraceRepository.count_by_user_id(user.id)\n\n async def block_received_count_task() -> None:\n nonlocal block_received_count, block_received_active_count\n total, active = await UserBlockRepository.count_received_by_user_id(user.id)\n block_received_count = total\n block_received_active_count = active\n\n async def block_issued_count_task() -> None:\n nonlocal block_issued_count, block_issued_active_count\n total, active = await UserBlockRepository.count_given_by_user_id(user.id)\n block_issued_count = total\n block_issued_active_count = active\n\n async def messages_received_count_task() -> None:\n nonlocal messages_received_count, messages_received_unread_count\n total, unread = await MessageRepository.count_received_by_user_id(user.id)\n messages_received_count = total\n messages_received_unread_count = unread\n\n async def messages_sent_count_task() -> None:\n nonlocal messages_sent_count\n messages_sent_count = await MessageRepository.count_sent_by_user_id(user.id)\n\n async with anyio.create_task_group() as tg:\n tg.start_soon(changesets_count_task)\n tg.start_soon(traces_count_task)\n tg.start_soon(block_received_count_task)\n tg.start_soon(block_issued_count_task)\n\n if access_private:\n tg.start_soon(messages_received_count_task)\n tg.start_soon(messages_sent_count_task)\n\n return {\n 'user': {\n XAttr('id'): user.id,\n XAttr('display_name'): user.display_name,\n XAttr('account_created'): user.created_at,\n 'description': user.description,\n ('contributor_terms' if format_is_json() else 'contributor-terms'): {\n XAttr('agreed'): True,\n **({XAttr('pd'): user.consider_public_domain} if access_private else {}),\n },\n 'img': {XAttr('href'): user.avatar_url},\n 'roles': [role.value for role in user.roles],\n 'changesets': {XAttr('count'): changesets_count},\n 'traces': {XAttr('count'): traces_count},\n 'blocks': {\n 'received': {\n XAttr('count'): block_received_count,\n XAttr('active'): block_received_active_count,\n },\n 'issued': {\n XAttr('count'): block_issued_count,\n XAttr('active'): block_issued_active_count,\n },\n },\n # private section\n **(\n {\n **(\n {\n 'home': {\n **Format06._encode_point(user.home_point),\n XAttr('zoom'): user.home_zoom,\n }\n }\n if user.home_point\n else {}\n ),\n 'languages': Format06._encode_languages(user.languages),\n 'messages': {\n 'received': {\n XAttr('count'): messages_received_count,\n XAttr('unread'): messages_received_unread_count,\n },\n 'sent': {XAttr('count'): messages_sent_count},\n },\n }\n if access_private\n else {}\n ),\n }\n }\n\n @staticmethod\n async def encode_users(users: Sequence[User]) -> dict:\n \"\"\"\n >>> encode_users([\n ... User(...),\n ... User(...),\n ... ])\n {'user': [{'@id': 1234, '@display_name': 'userName', ...}]}\n \"\"\"\n\n encoded = [None] * len(users)\n\n async def task(i: int, user: User):\n encoded[i] = await Format06.encode_user(user)\n\n async with anyio.create_task_group() as tg:\n for i, user in enumerate(users):\n tg.start_soon(task, i, user)\n\n if format_is_json():\n return {'users': tuple(user for user in encoded)}\n else:\n return {'user': tuple(user['user'] for user in encoded)}\n\n @staticmethod\n def decode_user_preference(pref: dict) -> UserPref:\n \"\"\"\n >>> decode_user_preference({'@k': 'key', '@v': 'value'})\n UserPref(key='key', value='value')\n \"\"\"\n\n return UserPref(\n **UserPrefValidating(\n user_id=auth_user().id,\n app_id=None, # 0.6 api does not support prefs partitioning\n key=pref['@k'],\n value=pref['@v'],\n ).to_orm_dict()\n )\n\n @staticmethod\n def decode_user_preferences(prefs: Sequence[dict]) -> Sequence[UserPref]:\n \"\"\"\n >>> decode_user_preferences([{'@k': 'key', '@v': 'value'}])\n [UserPref(key='key', value='value')]\n \"\"\"\n\n seen_keys = set()\n\n for pref in prefs:\n key = pref['@k']\n if key in seen_keys:\n raise_for().pref_duplicate_key(key)\n seen_keys.add(key)\n\n return tuple(Format06.decode_user_preference(pref) for pref in prefs)\n\n @staticmethod\n def encode_user_preferences(prefs: Sequence[UserPref]) -> dict:\n \"\"\"\n >>> encode_user_preferences([\n ... UserPref(key='key1', value='value1'),\n ... UserPref(key='key2', value='value2'),\n ... ])\n {'preferences': {'preference': [{'@k': 'key1', '@v': 'value1'}, {'@k': 'key2', '@v': 'value2'}]}}\n \"\"\"\n\n if format_is_json():\n return {\n 'preferences': {pref.key: pref.value for pref in prefs},\n }\n else:\n return {\n 'preferences': {\n 'preference': tuple(\n {\n '@k': pref.key,\n '@v': pref.value,\n }\n for pref in prefs\n )\n }\n }" }, { "identifier": "CHANGESET_COMMENT_BODY_MAX_LENGTH", "path": "src/limits.py", "snippet": "CHANGESET_COMMENT_BODY_MAX_LENGTH = 5_000 # NOTE: value TBD" }, { "identifier": "User", "path": "src/models/db/user.py", "snippet": "class User(Base.Sequential, CreatedAtMixin, RichTextMixin):\n __tablename__ = 'user'\n __rich_text_fields__ = (('description', TextFormat.markdown),)\n\n email: Mapped[str] = mapped_column(Unicode(EMAIL_MAX_LENGTH), nullable=False)\n display_name: Mapped[str] = mapped_column(Unicode, nullable=False)\n password_hashed: Mapped[str] = mapped_column(Unicode, nullable=False)\n created_ip: Mapped[IPv4Address | IPv6Address] = mapped_column(INET, nullable=False)\n\n status: Mapped[UserStatus] = mapped_column(Enum(UserStatus), nullable=False)\n\n auth_provider: Mapped[AuthProvider | None] = mapped_column(Enum(AuthProvider), nullable=True)\n auth_uid: Mapped[str | None] = mapped_column(Unicode, nullable=True)\n\n languages: Mapped[list[str]] = mapped_column(ARRAY(Unicode(LANGUAGE_CODE_MAX_LENGTH)), nullable=False)\n\n # defaults\n password_changed_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=func.now())\n password_salt: Mapped[str | None] = mapped_column(Unicode, nullable=True, default=None)\n consider_public_domain: Mapped[bool] = mapped_column(Boolean, nullable=False)\n roles: Mapped[list[UserRole]] = mapped_column(ARRAY(Enum(UserRole)), nullable=False, default=())\n description: Mapped[str] = mapped_column(UnicodeText, nullable=False, default='')\n description_rich_hash: Mapped[bytes | None] = mapped_column(LargeBinary(HASH_SIZE), nullable=True, default=None)\n description_rich: Mapped[CacheEntry | None] = relationship(\n CacheEntry,\n primaryjoin=CacheEntry.id == description_rich_hash,\n viewonly=True,\n default=None,\n lazy='raise',\n )\n editor: Mapped[Editor | None] = mapped_column(Enum(Editor), nullable=True, default=None)\n avatar_type: Mapped[AvatarType] = mapped_column(Enum(AvatarType), nullable=False, default=AvatarType.default)\n avatar_id: Mapped[str | None] = mapped_column(Unicode(STORAGE_KEY_MAX_LENGTH), nullable=True, default=None)\n home_point: Mapped[Point | None] = mapped_column(PointType, nullable=True, default=None)\n home_zoom: Mapped[int | None] = mapped_column(SmallInteger, nullable=True, default=None)\n\n # relationships (avoid circular imports)\n if TYPE_CHECKING:\n from src.models.db.oauth1_application import OAuth1Application\n from src.models.db.oauth2_application import OAuth2Application\n from src.models.db.user_block import UserBlock\n\n oauth1_applications: Mapped[list['OAuth1Application']] = relationship(\n back_populates='user',\n order_by='OAuth1Application.id.asc()',\n lazy='raise',\n )\n oauth2_applications: Mapped[list['OAuth2Application']] = relationship(\n back_populates='user',\n order_by='OAuth2Application.id.asc()',\n lazy='raise',\n )\n user_blocks_given: Mapped[list['UserBlock']] = relationship(\n back_populates='from_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n )\n active_user_blocks_received: Mapped[list['UserBlock']] = relationship(\n back_populates='to_user',\n order_by='UserBlock.id.desc()',\n lazy='raise',\n primaryjoin='and_(UserBlock.to_user_id == User.id, UserBlock.expired == false())',\n viewonly=True,\n )\n\n __table_args__ = (\n UniqueConstraint(email),\n UniqueConstraint(display_name),\n )\n\n @validates('languages')\n def validate_languages(self, _: str, value: Sequence[str]):\n if len(value) > USER_LANGUAGES_LIMIT:\n raise ValueError('Too many languages')\n return value\n\n @validates('description')\n def validate_description(self, _: str, value: str):\n if len(value) > USER_DESCRIPTION_MAX_LENGTH:\n raise ValueError('Description is too long')\n return value\n\n @property\n def is_administrator(self) -> bool:\n \"\"\"\n Check if the user is an administrator.\n \"\"\"\n\n return UserRole.administrator in self.roles\n\n @property\n def is_moderator(self) -> bool:\n \"\"\"\n Check if the user is a moderator.\n \"\"\"\n\n return UserRole.moderator in self.roles or self.is_administrator\n\n @property\n def extended_scopes(self) -> Sequence[ExtendedScope]:\n \"\"\"\n Get the user's extended scopes.\n \"\"\"\n\n result = []\n\n # role-specific scopes\n if self.is_administrator:\n result.append(ExtendedScope.role_administrator)\n if self.is_moderator:\n result.append(ExtendedScope.role_moderator)\n\n return result\n\n @property\n def permalink(self) -> str:\n \"\"\"\n Get the user's permalink.\n\n >>> user.permalink\n 'https://www.openstreetmap.org/user/permalink/123456'\n \"\"\"\n\n return f'{APP_URL}/user/permalink/{self.id}'\n\n @property\n def languages_str(self) -> str:\n return ' '.join(self.languages)\n\n @languages_str.setter\n def languages_str(self, s: str) -> None:\n languages = s.split()\n languages = (t.strip()[:LANGUAGE_CODE_MAX_LENGTH].strip() for t in languages)\n languages = (normalize_language_case(t) for t in languages)\n languages = (t for t in languages if t)\n self.languages = tuple(set(languages))\n\n @property\n def preferred_diary_language(self) -> LanguageInfo:\n \"\"\"\n Get the user's preferred diary language.\n \"\"\"\n\n # return the first valid language\n for code in self.languages:\n if lang := get_language_info(code):\n return lang\n\n # fallback to default\n return get_language_info(DEFAULT_LANGUAGE)\n\n @property\n def changeset_max_size(self) -> int:\n \"\"\"\n Get the maximum changeset size for this user.\n \"\"\"\n\n return UserRole.get_changeset_max_size(self.roles)\n\n @property\n def password_hasher(self) -> PasswordHash:\n \"\"\"\n Get the password hash class for this user.\n \"\"\"\n\n return PasswordHash(UserRole.get_password_hasher(self.roles))\n\n @property\n def avatar_url(self) -> str:\n \"\"\"\n Get the url for the user's avatar image.\n \"\"\"\n\n # when using gravatar, use user id as the avatar id\n if self.avatar_type == AvatarType.gravatar:\n return Avatar.get_url(self.avatar_type, self.id)\n else:\n return Avatar.get_url(self.avatar_type, self.avatar_id)\n\n async def home_distance_to(self, point: Point | None) -> float | None:\n return haversine_distance(self.home_point, point) if self.home_point and point else None" }, { "identifier": "ExtendedScope", "path": "src/models/scope.py", "snippet": "class ExtendedScope(BaseEnum):\n \"\"\"\n Extended scopes with entries that are not obtainable by normal means.\n \"\"\"\n\n read_prefs = 'read_prefs'\n write_prefs = 'write_prefs'\n write_diary = 'write_diary'\n write_api = 'write_api'\n read_gpx = 'read_gpx'\n write_gpx = 'write_gpx'\n write_notes = 'write_notes'\n\n # additional scopes\n read_email = 'read_email'\n skip_authorization = 'skip_authorization'\n\n web_user = 'web_user'\n\n # role-specific scopes\n role_moderator = 'role_moderator'\n role_administrator = 'role_administrator'" }, { "identifier": "Scope", "path": "src/models/scope.py", "snippet": "class Scope(BaseEnum):\n read_prefs = 'read_prefs'\n write_prefs = 'write_prefs'\n write_diary = 'write_diary'\n write_api = 'write_api'\n read_gpx = 'read_gpx'\n write_gpx = 'write_gpx'\n write_notes = 'write_notes'" }, { "identifier": "ChangesetCommentService", "path": "src/services/changeset_comment_service.py", "snippet": "class ChangesetCommentService:\n @staticmethod\n async def subscribe(changeset_id: int) -> Changeset:\n \"\"\"\n Subscribe current user to changeset discussion.\n \"\"\"\n\n try:\n async with DB() as session:\n changeset = await session.get(\n Changeset,\n changeset_id,\n options=[\n joinedload(Changeset.changeset_subscription_users),\n get_joinedload(),\n ],\n )\n\n if not changeset:\n raise_for().changeset_not_found(changeset_id)\n\n changeset.changeset_subscription_users.append(auth_user())\n\n except IntegrityError:\n raise_for().changeset_already_subscribed(changeset_id)\n\n return changeset\n\n @staticmethod\n async def unsubscribe(changeset_id: int) -> Changeset:\n \"\"\"\n Unsubscribe current user from changeset discussion.\n \"\"\"\n\n async with DB() as session:\n changeset = await session.get(\n Changeset,\n changeset_id,\n options=[\n joinedload(Changeset.changeset_subscription_users),\n get_joinedload(),\n ],\n )\n\n if not changeset:\n raise_for().changeset_not_found(changeset_id)\n\n # TODO: will this work?\n try:\n changeset.changeset_subscription_users.remove(auth_user())\n except ValueError:\n raise_for().changeset_not_subscribed(changeset_id)\n\n return changeset\n\n @staticmethod\n async def comment(changeset_id: int, text: str) -> Changeset:\n \"\"\"\n Comment on a changeset.\n \"\"\"\n\n async with DB() as session:\n changeset = await session.get(\n Changeset,\n changeset_id,\n options=[\n joinedload(Changeset.comments),\n get_joinedload(),\n ],\n )\n\n if not changeset:\n raise_for().changeset_not_found(changeset_id)\n if not changeset.closed_at:\n raise_for().changeset_not_closed(changeset_id)\n\n changeset.comments.append(\n ChangesetComment(\n user_id=auth_user().id,\n changeset_id=changeset_id,\n body=text,\n )\n )\n\n return changeset\n\n @staticmethod\n async def delete_comment_unsafe(comment_id: int) -> Changeset:\n \"\"\"\n Delete any changeset comment.\n \"\"\"\n\n async with DB() as session, session.begin():\n comment = await session.get(\n ChangesetComment,\n comment_id,\n with_for_update=True,\n )\n\n if not comment:\n raise_for().changeset_comment_not_found(comment_id)\n\n await session.delete(comment)\n await session.flush()\n\n changeset = await session.get(\n Changeset,\n comment.changeset_id,\n options=[get_joinedload()],\n )\n\n return changeset" } ]
from typing import Annotated from fastapi import APIRouter, Form from pydantic import PositiveInt from src.lib.auth import api_user from src.lib.format.format06 import Format06 from src.limits import CHANGESET_COMMENT_BODY_MAX_LENGTH from src.models.db.user import User from src.models.scope import ExtendedScope, Scope from src.services.changeset_comment_service import ChangesetCommentService
11,053
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt,
router = APIRouter() @router.post('/changeset/{changeset_id}/subscribe') async def changeset_subscribe( changeset_id: PositiveInt,
_: Annotated[User, api_user(Scope.write_api)],
3
2023-11-04 01:12:13+00:00
16k
codefuse-ai/Collinear-Constrained-Attention
train/trainer/atorch_trainer.py
[ { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size():\ndef wait_for_everyone():\ndef atorch_init_distributed(backend=\"nccl\"):\ndef atorch_reset_distributed():\ndef _goes_first(is_main):\ndef get_model_params_num(model):\ndef main_process_first():\ndef unwrap_model(model):\ndef honor_type(obj, generator):\ndef recursively_apply(\n func,\n data,\n *args,\n test_type=lambda t: isinstance(t, torch.Tensor),\n error_on_other_type=False,\n **kwargs,\n):\ndef gather(tensor):\n def _gpu_gather_one(tensor):\ndef save_ckpt(model, optimizer, lr_scheduler, epoch, steps, save_path, logger):\ndef scheduler_and_resume(args, train_dataloader, model, optimizer, checkpoint):\ndef get_computation_speed(batch_size_per_device, seq_len, step_time):\ndef human_readable_flops(num):\ndef get_tflops_new(args, batch_size, seq_len, step_time):\ndef get_tflops_megatron(total_model_param, hidden_size, num_hidden_layers, \n batch_size_per_device, seq_len, step_time):\ndef is_old_version(path):\ndef generate_task_id(data_paths, train_mode):\n def __init__(self, patience=7, verbose=False, delta=0):\n def __call__(self, val_loss, model):\n def save_checkpoint(self, val_loss, model):\nclass EarlyStopping:" }, { "identifier": "FAMO", "path": "utils/auto_accelerate_utils.py", "snippet": "class FAMO:\n \"\"\"\n Fast Adaptive Multitask Optimization.\n \"\"\"\n def __init__(\n self,\n n_tasks: int,\n device: torch.device,\n mode: str = 'famo_valid',\n gamma: float = 0.001, # the regularization coefficient, default: 0.001\n w_lr: float = 0.025, # the learning rate of the task logits, default: 0.025\n max_norm: float = 1.0, # the maximum gradient norm\n ):\n self.min_losses = torch.zeros(n_tasks).to(device)\n self.w = torch.tensor([0.0] * n_tasks, device=device, requires_grad=True)\n self.w_opt = torch.optim.Adam([self.w], lr=w_lr, weight_decay=gamma)\n self.max_norm = max_norm\n self.n_tasks = n_tasks\n self.device = device\n self.first_train_step = True\n self.first_valid_step = True\n self.print_loss = None\n self.mode = mode\n self.prev_train_loss = None\n self.prev_valid_loss = None\n self.ratio_valid_task_loss_prev = torch.zeros(len(ID2TASK)).to(device)\n self.global_steps = 0\n self.z = None\n \n def set_min_losses(self, losses):\n self.min_losses = losses\n\n def get_weighted_loss(self, losses):\n self.prev_train_loss = losses\n self.z = F.softmax(self.w * 1, -1)\n # if is_main_process() and (self.global_steps % 10 == 0):\n # logger.info(f\"complete_steps: {self.global_steps}, per_task_weight: {self.z}\")\n if -1e20 in self.ratio_valid_task_loss_prev and self.mode == 'famo_valid_ema':\n self.z = F.softmax(torch.where(self.ratio_valid_task_loss_prev == -1e20, -1e20, self.z), -1)\n if self.global_steps % 10 == 0:\n print_rank_0(f'ratio_valid_task_loss_prev is {self.ratio_valid_task_loss_prev}, after, z is {self.z}')\n D = losses - self.min_losses + 1e-8\n if self.mode.startswith('famo_train'):\n c = (self.z / D).sum().detach()\n loss = (D.log() * self.z / c).sum()\n else:\n loss = (D * self.z).sum()\n return loss\n\n def update(self, curr_loss):\n if self.mode.startswith('famo_valid') and self.first_valid_step:\n self.first_valid_step = False\n self.prev_valid_loss = curr_loss\n return\n if self.mode.startswith('famo_train'):\n prev_loss = self.prev_train_loss\n else:\n prev_loss = self.prev_valid_loss\n self.prev_valid_loss = curr_loss\n delta = (prev_loss - self.min_losses + 1e-8).log() - \\\n (curr_loss - self.min_losses + 1e-8).log()\n with torch.enable_grad():\n d = torch.autograd.grad(F.softmax(self.w, -1),\n self.w,\n grad_outputs=delta.detach())[0]\n self.w_opt.zero_grad()\n self.w.grad = d\n self.w_opt.step()\n\n def backward(\n self,\n losses: torch.Tensor,\n shared_parameters: Union[\n List[torch.nn.parameter.Parameter], torch.Tensor\n ] = None,\n ):\n \"\"\"\n Parameters\n ----------\n losses :\n shared_parameters :\n task_specific_parameters :\n last_shared_parameters : parameters of last shared layer/block\n Returns\n -------\n Loss, extra outputs\n \"\"\"\n loss = self.get_weighted_loss(losses=losses)\n # if self.max_norm > 0 and shared_parameters is not None:\n # torch.nn.utils.clip_grad_norm_(shared_parameters, self.max_norm)\n # loss.backward()\n return loss" }, { "identifier": "get_ltor_masks_and_position_ids", "path": "utils/auto_accelerate_utils.py", "snippet": "def get_ltor_masks_and_position_ids(data):\n \"\"\"Build masks and position id for left to right model.\"\"\"\n\n # Extract batch size and sequence length.\n batch_size, seq_length = data.size()\n\n # Attention mask (lower triangular).\n # attention_mask = get_attn_mask(\n # seq_length=seq_length,\n # device=data.device,\n # )\n attention_mask = torch.ones((batch_size, seq_length), device=data.device)\n\n # Position ids.\n position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)\n position_ids = position_ids.unsqueeze(0).expand_as(data).clone()\n\n return attention_mask, position_ids" }, { "identifier": "SelfPacedStatus", "path": "utils/auto_accelerate_utils.py", "snippet": "class SelfPacedStatus:\n def __init__(self, interval=20):\n super(SelfPacedStatus, self).__init__()\n self.complete_steps = None\n self.current_epoch = None\n self.mode = None\n self.task_loss_prev = None\n self.w = None\n self.interval = interval\n \n def update(self, complete_steps, current_epoch, mode, task_loss_prev):\n self.complete_steps = complete_steps\n self.current_epoch = current_epoch\n self.mode = mode\n self.task_loss_prev = task_loss_prev" }, { "identifier": "GPTNeoXLayer", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_parallel_residual = config.use_parallel_residual\n self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = GPTNeoXAttention(config)\n self.mlp = GPTNeoXMLP(config)\n\n def forward(\n self,\n hidden_states: Optional[torch.FloatTensor],\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n ):\n\n attention_layer_outputs = self.attention(\n self.input_layernorm(hidden_states),\n attention_mask=attention_mask,\n position_ids=position_ids,\n layer_past=layer_past,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)\n outputs = attention_layer_outputs[1:]\n\n if self.use_parallel_residual:\n # pseudocode:\n # x = x + attn(ln1(x)) + mlp(ln2(x))\n mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))\n hidden_states = mlp_output + attn_output + hidden_states\n else:\n # pseudocode:\n # x = x + attn(ln1(x))\n # x = x + mlp(ln2(x))\n attn_output = attn_output + hidden_states\n mlp_output = self.mlp(self.post_attention_layernorm(attn_output))\n hidden_states = mlp_output + attn_output\n\n if use_cache:\n outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)\n else:\n outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)\n\n return outputs" }, { "identifier": "GPTNeoXAttention", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them\"\n )\n self.head_size = self.hidden_size // self.num_attention_heads\n self.rotary_ndims = int(self.head_size * config.rotary_pct)\n self._init_bias(config.max_position_embeddings)\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9), persistent=False)\n self._init_rope()\n self.register_buffer(\n \"norm_factor\",\n torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype()),\n persistent=False,\n )\n self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def _init_bias(self, max_positions, device=None):\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(\n 1, 1, max_positions, max_positions\n ),\n persistent=False,\n )\n if device is not None:\n self.bias = self.bias.to(device)\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = GPTNeoXRotaryEmbedding(\n self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base\n )\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: torch.FloatTensor,\n position_ids: torch.LongTensor,\n head_mask: Optional[torch.FloatTensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ):\n has_layer_past = layer_past is not None\n\n # Compute QKV\n # Attention heads [batch, seq_len, hidden_size]\n # --> [batch, seq_len, (np * 3 * head_size)]\n qkv = self.query_key_value(hidden_states)\n\n # [batch, seq_len, (num_heads * 3 * head_size)]\n # --> [batch, seq_len, num_heads, 3 * head_size]\n new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)\n qkv = qkv.view(*new_qkv_shape)\n\n # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]\n query = qkv[..., : self.head_size].permute(0, 2, 1, 3)\n t_layer = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)\n value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)\n\n t_layer_1 = t_layer[..., : t_layer.shape[-1] // 2]\n t_layer_2 = t_layer[..., t_layer.shape[-1] // 2 :]\n t_layer = (t_layer_1+t_layer_2)/2\n\n t_layer = F.relu(t_layer)\n\n t_layer = torch.cat((t_layer, t_layer), dim=-1)\n\n # Compute rotary embeddings on rotary_ndims\n query_rot = query[..., : self.rotary_ndims]\n query_pass = query[..., self.rotary_ndims :]\n t_rot = t_layer[..., : self.rotary_ndims]\n t_pass = t_layer[..., self.rotary_ndims :]\n\n # Compute token offset for rotary embeddings (when decoding)\n seq_len = t_layer.shape[-2]\n if has_layer_past:\n seq_len += layer_past[0].shape[-2]\n cos, sin = self.rotary_emb(value, seq_len=seq_len)\n query_rot, t_layer = apply_rotary_pos_emb(query_rot, t_rot, cos, sin, position_ids)\n query_rot = torch.cat((query_rot, query_pass), dim=-1)\n t_layer = torch.cat((t_layer, t_pass), dim=-1)\n\n # Cache QKV values\n if has_layer_past:\n past_t = layer_past[0]\n past_value = layer_past[1]\n t_layer = torch.cat((past_t, t_layer), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n present = (t_layer, value) if use_cache else None\n\n # Compute attention\n attn_output, attn_weights = self._attn(query, t_layer, query_rot, value, attention_mask, head_mask)\n\n # Reshape outputs\n attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)\n attn_output = self.dense(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n @classmethod\n def _split_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Splits hidden dim into attn_head_size and num_attention_heads\n \"\"\"\n # tensor: [bs, seq_len, hidden_size]\n new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(new_shape)\n # -> [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3)\n return tensor\n\n @classmethod\n def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden dim\n \"\"\"\n # tensor [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)\n # -> [bs, seq_len, hidden_size]\n return tensor\n\n def _attn(self, query, t_layer, query_rot, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = t_layer.size(-2)\n\n # dynamically increase the causal mask with the key length, if needed.\n if key_length > self.bias.shape[-1]:\n self._init_bias(key_length, device=t_layer.device)\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n # query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n # key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n # attn_scores = torch.zeros(\n # batch_size * num_attention_heads,\n # query_length,\n # key_length,\n # dtype=query.dtype,\n # device=key.device,\n # )\n # attn_scores = torch.baddbmm(\n # attn_scores,\n # query,\n # key.transpose(1, 2),\n # beta=1.0,\n # alpha=(torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device) / self.norm_factor),\n # )\n # attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n # print(query.shape)\n # print(t_layer.shape)\n # print(query_rot.shape)\n\n attn_scores = contract(\n # 'nbpd,sbpd,nbpd->bpns',\n 'bpnd,bpsd,bpnd->bpns',\n query, # [sq, b, np, hn] [b,np,sq,hn]\n t_layer, #[sk, b, np, hn] [b,np,sk,hn]\n query_rot, # [sq, b, np, hn] [b,np,sq,hn]\n backend='torch'\n ) / self.norm_factor\n\n mask_value = torch.finfo(attn_scores.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)\n attn_scores = torch.where(causal_mask, attn_scores, mask_value)\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_scores = attn_scores + attention_mask\n\n attn_weights = nn.functional.softmax(attn_scores, dim=-1)\n attn_weights = attn_weights.to(value.dtype)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n return attn_output, attn_weights" }, { "identifier": "GPTNeoXMLP", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)\n self.act = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n hidden_states = self.dense_h_to_4h(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.dense_4h_to_h(hidden_states)\n return hidden_states" }, { "identifier": "LlamaDecoderLayer", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaDecoderLayer(nn.Module):\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.self_attn = LlamaAttention(config=config)\n self.mlp = LlamaMLP(config)\n self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n \"\"\"\n\n residual = hidden_states\n\n hidden_states = self.input_layernorm(hidden_states)\n\n # Self Attention\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = residual + hidden_states\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights,)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs" }, { "identifier": "LlamaAttention", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.num_key_value_heads = config.num_key_value_heads\n self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n self.max_position_embeddings = config.max_position_embeddings\n\n #20230803 T需要保持非负\n self.relu = ACT2FN['relu']\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n f\" and `num_heads`: {self.num_heads}).\"\n )\n self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)\n #20230803 K改为T\n self.t_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n # self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)\n self._init_rope()\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n\n # todo tp>1\n if self.config.pretraining_tp > 1:\n key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp\n query_slices = self.q_proj.weight.split(\n (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0\n )\n key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)\n value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)\n\n query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]\n query_states = torch.cat(query_states, dim=-1)\n\n key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]\n key_states = torch.cat(key_states, dim=-1)\n\n value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]\n value_states = torch.cat(value_states, dim=-1)\n\n else:\n query_states = self.q_proj(hidden_states)\n #20230803 K改为T\n t_states = self.t_proj(hidden_states)\n # key_states = self.k_proj(hidden_states)\n value_states = self.v_proj(hidden_states)\n\n query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n\n #20230803 T的定义\n t_states = t_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n t_states_1 = t_states[..., : t_states.shape[-1] // 2]\n t_states_2 = t_states[..., t_states.shape[-1] // 2 :]\n t_states = (t_states_1+t_states_2)/2\n t_states = F.relu(t_states)\n t_states = torch.cat((t_states, t_states), dim=-1)\n\n # key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n\n kv_seq_len = t_states.shape[-2]\n if past_key_value is not None:\n kv_seq_len += past_key_value[0].shape[-2]\n cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)\n query_rot, t_states = apply_rotary_pos_emb(query_states, t_states, cos, sin, position_ids)\n\n if past_key_value is not None:\n # reuse k, v, self_attention\n t_states = torch.cat([past_key_value[0], t_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n\n past_key_value = (t_states, value_states) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n t_states = repeat_kv(t_states, self.num_key_value_groups)\n value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n attn_weights = contract(\n 'bpnd,bpsd,bpnd->bpns',\n query_states, # [b,p,sq,d]\n t_states, # [b,p,sk,d]\n query_rot, # [b,p,sq,d]\n backend='torch'\n ) / math.sqrt(self.head_dim)\n # attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n\n if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights + attention_mask\n\n # upcast attention to fp32\n attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)\n attn_output = torch.matmul(attn_weights, value_states)\n\n if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous()\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n if self.config.pretraining_tp > 1:\n attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)\n o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)\n attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])\n else:\n attn_output = self.o_proj(attn_output)\n\n if not output_attentions:\n attn_weights = None\n\n return attn_output, attn_weights, past_key_value" }, { "identifier": "LlamaMLP", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.intermediate_size = config.intermediate_size\n self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, x):\n if self.config.pretraining_tp > 1:\n slice = self.intermediate_size // self.config.pretraining_tp\n gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)\n up_proj_slices = self.up_proj.weight.split(slice, dim=0)\n down_proj_slices = self.down_proj.weight.split(slice, dim=1)\n\n gate_proj = torch.cat(\n [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1\n )\n up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)\n\n intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)\n down_proj = [\n F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)\n ]\n down_proj = sum(down_proj)\n else:\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n return down_proj" }, { "identifier": "PeftModel", "path": "model/peft/modeling_peft.py", "snippet": "class AntPeftForCausalLM(PeftModelForCausalLM):\nclass AntPeftForEmbedding(PeftModel):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n route_id: int = 0,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n def from_pretrained(\n cls,\n model,\n model_id: str,\n adapter_name: str = \"default\",\n is_trainable: bool = False,\n resume_from_checkpoint: bool = False,\n **kwargs\n ):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n query_ids: torch.Tensor,\n query_position_ids: torch.Tensor = None,\n query_attention_mask: torch.Tensor = None,\n query_mask: torch.Tensor = None,\n passage_ids: torch.Tensor = None,\n passage_position_ids: torch.Tensor = None,\n passage_attention_mask: torch.Tensor = None,\n passage_mask: torch.Tensor = None,\n route_id: int = 0,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):" } ]
import datetime import json import logging import math import os import random import re import shutil import time import warnings import gc import numpy as np import atorch import torch from functools import partial from pathlib import Path from deepspeed.ops.adam import DeepSpeedCPUAdam from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm from transformers import get_scheduler as get_scheduler_trans from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.trainer import ( OPTIMIZER_NAME, SCHEDULER_NAME, TRAINER_STATE_NAME, TRAINING_ARGS_NAME ) from transformers.trainer_pt_utils import reissue_pt_warnings from transformers.trainer_utils import ( PREFIX_CHECKPOINT_DIR, ) from transformers.utils import WEIGHTS_NAME from torch.nn import CrossEntropyLoss from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus from atorch.auto import auto_accelerate from atorch.utils.version import torch_version from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP from model.glm.modeling_glm import GLMBlock from torch.cuda.amp import GradScaler from apex.optimizers import FusedSGD from model.peft.modeling_peft import PeftModel
12,545
# labels=batch['labels'], ) # self paced loss # ema_tmp: L_valid_ema at step t-1 if (self.global_steps + 1) % self.args.selfpaced_interval == 0 and (self.args.weighted_loss_mode.startswith('famo_valid_ema') or self.args.weighted_loss_mode == 'selfpaced'): ema_tmp = 0.001 * self.valid_task_loss_prev + (1 - 0.001) * self.ema_valid_task_loss_prev self.ratio_valid_task_loss_prev = (ema_tmp - self.ema_valid_task_loss_prev ) / (self.ema_valid_task_loss_prev + 1e-6) self.ema_valid_task_loss_prev = ema_tmp self.ratio_valid_task_loss_prev = torch.where(self.ratio_valid_task_loss_prev > 0, -1e20, self.ratio_valid_task_loss_prev) self.famo.ratio_valid_task_loss_prev = self.ratio_valid_task_loss_prev if self.args.weighted_loss_mode == 'selfpaced': self.selfpaced_status.update(self.global_steps + 1, epoch + 1, 'train', self.ratio_valid_task_loss_prev) loss, task_loss, task_num, self.famo, self.selfpaced_status = self.loss_func(outputs, batch, self.args.weighted_loss_mode, self.famo, self.selfpaced_status) # print(f'rank: {self.rank}, loss: {loss}, task loss: {task_loss}') if self.args.weighted_loss_mode.startswith('famo'): self.famo.print_loss = self.famo.print_loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=self.famo.print_loss.device, dtype=self.famo.print_loss.dtype) loss_tensor[0] = self.famo.print_loss.item() else: loss = loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=loss.device, dtype=loss.dtype) loss_tensor[0] = loss.item() torch.distributed.all_reduce(loss_tensor) torch.distributed.all_reduce(task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(task_num, op=torch.distributed.ReduceOp.SUM) reduce_loss = loss_tensor.sum() / torch.distributed.get_world_size() if has_inf_or_nan(reduce_loss): print_rank_0(f'There have nan loss.') self.skipped_steps += 1 skipped = True else: self.accumulated_loss += reduce_loss.item() mean_task_loss = task_loss / torch.distributed.get_world_size() self.accumulated_task_loss += mean_task_loss.cpu().numpy() self.accumulated_task_num += task_num.cpu().numpy() loss.backward() self.global_steps += 1 self.famo.global_steps += 1 if step % self.args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1: if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) self.optimizer.step() overflow = hasattr(self.optimizer, "step_was_skipped") and self.optimizer.step_was_skipped if skipped != overflow: print(f'skipped != overflow!!!!!!!!!!!!!!!!') if not overflow: self.lr_scheduler.step() # if not skipped: # self.optimizer.step() # self.lr_scheduler.step() self.optimizer.zero_grad() if (self.args.weighted_loss_mode.startswith('famo_valid') or self.args.weighted_loss_mode == 'selfpaced') and not skipped: # if self.args.weighted_loss_mode.startswith('famo_valid') and not skipped and step % self.args.famo_interval == 0: # delete caches # gc.collect() # torch.cuda.empty_cache() # self.model.eval() # if (step // self.args.famo_interval) % self.valid_dataloader_length == 0: if step % self.valid_dataloader_length == 0: valid_iterator = iter(self.valid_dataloader) valid_step = step % self.valid_dataloader_length if self.famo_resume: self.famo_resume = False for i in range(valid_step): v_batch = next(valid_iterator) v_batch = next(valid_iterator) if self.args.weighted_loss_mode == 'selfpaced': self.valid_task_loss_prev = self.self_paced_evaluate(self.global_steps, epoch + 1, v_batch, self.ratio_valid_task_loss_prev) else: with torch.autocast(device_type='cuda', dtype=torch.bfloat16): valid_outputs = self.model( input_ids=v_batch['input_ids'].to(self.device), attention_mask=v_batch['attention_mask'].to(self.device), position_ids=v_batch['position_ids'].to(self.device) ) valid_loss, valid_task_loss, valid_task_num, _, _ = self.loss_func(valid_outputs, v_batch, self.args.weighted_loss_mode) if self.args.weighted_loss_mode.startswith('famo_valid_ema'): torch.distributed.all_reduce(valid_task_loss, op=torch.distributed.ReduceOp.SUM) valid_task_loss /= torch.distributed.get_world_size() self.valid_task_loss_prev = valid_task_loss.clone().detach() # if self.famo.first_valid_step and self.args.resume_from_checkpoint == 'true': if self.args.resume_from_checkpoint == 'true': self.ema_valid_task_loss_prev = self.valid_task_loss_prev # self.scaler.scale(valid_loss).backward() valid_loss.backward() valid_loss_item = valid_loss.item() if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) # print_rank_0(f'valid_loss {valid_loss.item()}, task loss: {valid_task_loss}, valid step: {valid_step}') self.famo.update(valid_task_loss) self.optimizer.zero_grad() step_time = time.time() - step_start
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. HYPER_PARAMETER_NAME = 'hyper_parameters.json' ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin' EPOCH_CHECKPOINT_NAME = 'epoch' FAMO_CHECKPOINT_NAME = 'famo_checkpoint' EMA_CHECKPOINT_NAME = 'ema_checkpoint' # logger = logging.getLogger(__name__) def is_local_main_process(): return atorch.local_rank() == 0 def is_global_main_process(): return atorch.rank() == 0 def has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def count_model_params(model): trainable_params = 0 all_params = 0 for param in model.parameters(): num_params = param.numel() all_params += num_params if param.requires_grad: trainable_params += num_params return all_params, trainable_params class AtorchArguments: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): inverse_log_warm_up = 1.0 / math.log(num_warmup_steps) if current_step == 0: return 0.0 if current_step < num_warmup_steps: return inverse_log_warm_up * math.log(current_step) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps): scheduler_map = { 'log_warmup_linear_decay': get_linear_schedule_with_log_warmup} try: lr_scheduler = get_scheduler_trans( name, optimizer, num_warmup_steps, num_training_steps) return lr_scheduler except Exception: schedule_func = scheduler_map[name] return schedule_func(optimizer, num_warmup_steps, num_training_steps) class AtorchTrainer: def __init__(self, model, args, train_dataset, valid_dataset, tokenizer=None, callbacks=None, no_save_atorch_checkpoint=None, save_pytorch_model_bin_checkpoint=True, train_peft=False, rank=0, max_shard_size='10GB', files_to_save=None, args_to_save=None, data_collator=None, my_loss_func=None, **kwargs, ): self.args = args self.TASK2ID = TASK2ID self.ID2TASK = ID2TASK print('in atorch trainer') print(TASK2ID) print(ID2TASK) self.model = model self.no_save_atorch_checkpoint = no_save_atorch_checkpoint self.save_pytorch_model_bin_checkpoint = save_pytorch_model_bin_checkpoint self.train_peft = train_peft self.rank = rank self.kwargs = kwargs self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.tokenizer = tokenizer self.max_shard_size = max_shard_size self.files_to_save = files_to_save self.args_to_save = args_to_save self.best_metric = None self.best_model_checkpoint = None self.no_save_base_model = True self.device = f"cuda:{atorch.local_rank()}" self.famo = FAMO(n_tasks=len(TASK2ID), device=self.device, mode=self.args.weighted_loss_mode) self.famo_resume = False self.selfpaced_status = SelfPacedStatus(args.selfpaced_interval) self.total_train_batch_size = self.args.per_device_train_batch_size * \ self.args.gradient_accumulation_steps * \ atorch.world_size() self.data_collator = data_collator self.my_loss_func = my_loss_func if self.args.early_stopping_patience > 0: print(f'early_stopping_patience: {self.args.early_stopping_patience}') patience = self.args.early_stopping_patience self.early_stopping = EarlyStopping(patience, verbose=True) self.train_dataloader_args = { "shuffle": True, "batch_size": self.total_train_batch_size, "pin_memory": True, "collate_fn": data_collator, "drop_last": True, "num_workers": self.args.num_workers, # "persistent_workers": args.num_workers > 0, } self.valid_dataloader = DataLoader( valid_dataset, sampler=DistributedSampler(valid_dataset, shuffle=True), batch_size=args.per_device_valid_batch_size, pin_memory=True, collate_fn=data_collator ) self.valid_dataloader_length = len(self.valid_dataloader) if self.args.resume_from_checkpoint == 'true': self.resume_checkpoint_dir = self.get_last_checkpoint( self.args.output_dir) self.atorch_args = AtorchArguments( lr=args.learning_rate, weight_decay=args.weight_decay, adam_eps=args.adam_epsilon, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2) self.atorch_init() self.num_update_steps_per_epoch = math.ceil( len(self.train_dataloader) / self.args.gradient_accumulation_steps) print(f'number of update steps per epoch: {self.num_update_steps_per_epoch}') if self.args.max_steps == -1: self.args.max_steps = int( self.args.num_train_epochs * self.num_update_steps_per_epoch) else: self.args.num_train_epochs = math.ceil( self.args.max_steps / self.num_update_steps_per_epoch) # self.args.warmup_steps = self.args.get_warmup_steps( # self.args.max_steps) # 找不到get_warmup_steps custom_lr_scheduler_type = self.kwargs.get( 'custom_lr_scheduler_type', None) self.lr_scheduler = get_scheduler( name=custom_lr_scheduler_type if custom_lr_scheduler_type else self.args.lr_scheduler_type, optimizer=self.optimizer, num_warmup_steps=self.args.num_warmup_steps, num_training_steps=self.args.max_steps, ) print_rank_0(f'lr_scheduler{self.lr_scheduler}') if self.args.resume_from_checkpoint == 'true': with warnings.catch_warnings(record=True): self.lr_scheduler.load_state_dict(torch.load( os.path.join(self.resume_checkpoint_dir, SCHEDULER_NAME))) self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP) wrap_class = (LlamaDecoderLayer,) elif self.args.model_type == 'glm': wrap_class = (GLMBlock,) parallel_mode = [] if self.args.dp: # p_mode = ([("data", torch.distributed.get_world_size())], None) parallel_mode.append(("data", self.args.dp)) if self.args.tp: parallel_mode.append(("tensor_parallel", self.args.tp)) strategy = [ # ("parallel_mode", p_mode), ("parallel_mode", (parallel_mode, None)), "module_replace", # ("fsdp", fsdp_config), # ("amp_native", {"dtype": torch.bfloat16}) if self.args.bf16 else "amp_native", # ("checkpoint", wrap_class), ] if self.args.peft_type is None or self.args.peft_type == 'lora': cpu_offload = False if self.args.total_model_param < 1e9 else True fsdp_config = { "atorch_wrap_cls": wrap_class, "sync_module_states": True, "use_orig_params": True, "limit_all_gathers": True, # "cpu_offload": True, } print(fsdp_config) fsdp_opt = ("fsdp", fsdp_config) strategy.append(fsdp_opt) self.args.atorch_opt = "fsdp" else: num_all_params, num_trainable_params = count_model_params(self.model) if num_all_params < 11e9 or self.args.peft_type == "qlora": # For GLM-10B logger.info( f"Found using {self.args.peft_type} method. The peft model has {num_all_params} and only " f"{num_trainable_params} params are trainable({100 * num_trainable_params / num_all_params}%)" ". Set atorch opt to DistributedDataParallel.") self.args.atorch_opt = "ddp" if self.args.bf16 or self.args.fp16: if self.args.bf16: amp_config = {"dtype": torch.bfloat16, "skip_if_nonfinite": True} # amp_config = {"dtype": torch.bfloat16} if self.args.peft_type == "qlora": # The dtype of grads is bf16 when using qlora # atorch scaler does not support bf16 grads. amp_config["skip_if_nonfinite"] = False elif self.args.fp16: amp_config = {"dtype": torch.float16} strategy.append(("amp_native", amp_config)) # strategy.append(("half", "bf16")) if self.args.checkpoint_activations: strategy.append(("checkpoint", wrap_class)) print(f"Manually loaded auto acc strategy: {strategy}") def prepare_input(batch, device): # DEBUG: GLM NoneType batch = {k: v.to(device=device, non_blocking=True) if v is not None else None for k, v in batch.items()} return batch def optim_param_func(model, args): no_decay = ["bias", "LayerNorm.weight", "layernorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] return optimizer_grouped_parameters # load fsdp checkpoint参数 if self.args.resume_from_checkpoint == 'true': logger.info(f'Resume training from {self.resume_checkpoint_dir}') if self.is_rank0(): sd = torch.load(os.path.join( self.resume_checkpoint_dir, ATORCH_CHECKPOINT_NAME), map_location='cpu') model_state_dict, optim_state_dict = sd['model_state_dict'], sd['optimizer_state_dict'] else: model_state_dict, optim_state_dict = None, None torch.distributed.barrier() # other rank waiting ########## self.load_atorch_model_state(model_state_dict) ########## if self.is_rank0(): print(f'GPU mem before fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) optim_func = torch.optim.AdamW print(f'optimizer before fsdp: {optim_func}') ddp_find_unused_parameters = None if self.args.atorch_opt == "ddp" and not (self.args.peft_type in ["lora", "qlora"] and self.args.checkpoint_activations): ddp_find_unused_parameters = True status, result, best_strategy = auto_accelerate( self.model, optim_func, self.train_dataset, dataloader_args=self.train_dataloader_args, loss_func=self.my_loss_func, prepare_input=prepare_input, optim_args={ "lr": self.atorch_args.lr, "weight_decay": self.atorch_args.weight_decay, "eps": self.atorch_args.adam_eps, "betas": (self.atorch_args.adam_beta1, self.atorch_args.adam_beta2), }, optim_param_func=partial( optim_param_func, args=self.atorch_args), load_strategy=strategy, ignore_dryrun_on_load_strategy=True, find_unused_parameters=ddp_find_unused_parameters, ) assert ( status ), f"auto_accelerate failed. status: {status}, result: {result}, best_strategy: {best_strategy}" print(f"Best strategy is: {best_strategy}") self.model = result.model self.optimizer = result.optim print(f'optimizer after fsdp: {self.optimizer}') self.loss_func = result.loss_func self.train_dataloader = result.dataloader self.prepare_input = result.prepare_input if self.args.resume_from_checkpoint == 'true': self.load_atorch_optim_state(optim_state_dict) if self.args.weighted_loss_mode.startswith('famo_valid'): self.load_famo_state() print(f"atorch use optimizer: {self.optimizer}") if self.is_rank0(): print(f'GPU mem after fsdp:') print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) def evaluate(self): logger.info(f"Start evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) print(f'valid dataset length is: {len(self.valid_dataset)}') print(f'valid dataloader length is: {len(self.valid_dataloader)}') print(f'per device batch size: {self.args.per_device_valid_batch_size}') progress_bar = tqdm(range(len(self.valid_dataloader)), disable=not is_local_main_process(), smoothing=0) self.model.eval() losses = [] accumulated_task_loss_np = np.zeros(len(self.ID2TASK)) accumulated_task_num_np = np.zeros(len(self.ID2TASK)) accumulated_step = 0 for step, batch in enumerate(self.valid_dataloader): # if step >= self.args.valid_iters: if step >= self.args.valid_iters and (self.args.total_model_param >= 1e9 or self.args.train_mode == 'sst'): break with torch.no_grad(): # batch = {k: v.to(self.device) for k, v in batch.items()} # batch = self.prepare_input(batch, self.device) # outputs = self.model(**batch) outputs = self.model( input_ids=batch['input_ids'].to(self.device), attention_mask=batch['attention_mask'].to(self.device), position_ids=batch['position_ids'].to(self.device) ) # loss = outputs["loss"] loss, task_loss, task_num, _, _ = self.loss_func(outputs, batch, self.args.weighted_loss_mode) repeated_loss = loss.repeat( self.args.per_device_valid_batch_size) if repeated_loss.ndim == 0: repeated_loss = repeated_loss.clone()[None] output_tensors = [repeated_loss.clone() for _ in range(atorch.world_size())] torch.distributed.all_gather(output_tensors, repeated_loss) for tensor in output_tensors: if torch.isnan(tensor).any() or torch.isinf(tensor).any(): accumulated_step -= 1 continue losses.append(torch.cat(output_tensors, dim=0).cpu()) task_loss = task_loss.cpu().numpy() task_num = task_num.cpu().numpy() accumulated_task_loss_np += task_loss accumulated_task_num_np += task_num accumulated_step += 1 progress_bar.update(1) losses = torch.cat(losses) losses = losses[: len(self.valid_dataset)] mean_loss = torch.mean(losses).item() accumulated_task_loss = torch.tensor(accumulated_task_loss_np).to(self.device) accumulated_task_num = torch.tensor(accumulated_task_num_np).to(self.device) torch.distributed.all_reduce(accumulated_task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(accumulated_task_num, op=torch.distributed.ReduceOp.SUM) accumulated_task_loss /= torch.distributed.get_world_size() valid_task_loss = accumulated_task_loss / (accumulated_step - 1) logs = {'valid_loss': mean_loss} per_task_valid_loss = {self.ID2TASK[i]+'_loss': valid_task_loss[i].item() for i in range(len(self.ID2TASK))} logs.update(per_task_valid_loss) if is_global_main_process(): logger.info('log point') for i in range(len(self.ID2TASK)): if accumulated_task_num[i] != 0: logger.info(f"{self.ID2TASK[i]}_loss: {valid_task_loss[i]}, sample nums: {accumulated_task_num[i]}") self.log(logs, step=self.global_steps, phase='Evaluation') metrics = {'valid_loss': mean_loss, 'valid_task_loss': valid_task_loss} logger.info(f"Finish evaluation") if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) return metrics def log(self, logs, step, phase='Train'): if not self.summary_writer: return logger.info(json.dumps(logs)) for key, value in logs.items(): self.summary_writer.add_scalar(f'{phase}/{key}', value, step) def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)', use_mtime=False ): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob( f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append( (os.path.getmtime(path), path)) else: regex_match = re.search( f".*{checkpoint_prefix}-({checkpoint_name_pattern})", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append( (int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.best_model_checkpoint))) # for i in range(best_model_index, len(checkpoints_sorted) - 2): for i in range(best_model_index, len(checkpoints_sorted) - 1): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] print_rank_0(f'checkpoints sorted list: {checkpoints_sorted}') return checkpoints_sorted def _rotate_checkpoints( self, use_mtime=False, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='.*') -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern=checkpoint_name_pattern) if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit number_of_checkpoints_to_delete = max( 0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def _clean_atorch_checkpoints(self, output_dir=None, prefix=PREFIX_CHECKPOINT_DIR): # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints( output_dir=output_dir, checkpoint_prefix=prefix, checkpoint_name_pattern='([0-9]+)') # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. for checkpoint in checkpoints_sorted[:-1]: logger.info( f"Deleting older atorch checkpoint [{checkpoint}] due to self.args.save_total_limit") try: os.remove(os.path.join(checkpoint, ATORCH_CHECKPOINT_NAME)) except Exception: continue def _save_peft_model(self, output_dir, state_dict=None): logger.info(f"Start saving peft model to {output_dir}") output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) model = unwrap_model(self.model) if isinstance(model, PeftModel): if state_dict is None: state_dict = model.state_dict() model.save_pretrained( output_dir, state_dict=state_dict, is_main_process=self.is_rank0()) else: if state_dict is None: state_dict = self.model.state_dict() if self.is_rank0(): torch.save(state_dict, os.path.join( output_dir, "pytorch_model.bin")) logger.info(f"Saving peft model done.") def _save_model(self, output_dir=None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel): if isinstance(unwrap_model(self.model), PreTrainedModel): print_rank_0('save in not pretrained model~~~~~~~') if state_dict is None: state_dict = self.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in self.model.state_dict().items()} model = unwrap_model(self.model) model.save_pretrained( output_dir, state_dict=state_dict, max_shard_size=self.max_shard_size, is_main_process=self.is_rank0()) # unwrap_model(self.model).save_pretrained( # output_dir, state_dict=state_dict, max_shard_size=self.max_shard_size) elif isinstance(unwrap_model(self.model), PeftModel): if state_dict is None: state_dict = unwrap_model(self.model).base_model.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in state_dict.items()} # Filter the peft params ... param_keys = list(state_dict.keys()) base_model_state_dict = {} for key in param_keys: if LORA_KEY in key: # state_dict.pop(key) continue elif PEFT_PARAM_PREFIX in key: # value = state_dict.pop(key) value = state_dict[key] new_key = key.replace(PEFT_PARAM_PREFIX, "") base_model_state_dict[new_key] = value else: base_model_state_dict[key] = value if self.is_rank0(): torch.save(base_model_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") if state_dict is None: state_dict = self.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in self.model.state_dict().items()} torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: print(f'save in pretrained model!!!!!!') if state_dict is None: state_dict = self.model.state_dict() # state_dict = {key: value.bfloat16() if self.args.bf16 else value.half() for key, value in self.model.state_dict().items()} self.model.save_pretrained( output_dir, state_dict=state_dict, max_shard_size=self.max_shard_size) # if self.tokenizer is not None and self.args.model_type == 'glm': # TODO: 需要适配加载tokenizer,主要是gpt2_multi_task_dataset的encode部分里的tokenizer.tokenize函数 if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def is_rank0(self): return self.rank == 0 def _save_atorch_checkpoint(self, output_dir): # StateDictType.FULL_STATE_DICT得到完整的模型状态。 # FullStateDictConfig指定保存到CPU,仅rank0保存 save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, save_policy): model_state_dict = self.model.state_dict() optim_state_dict = FSDP.full_optim_state_dict(self.model, self.optimizer) # may be removed after PyTorch 2.2 if self.is_rank0(): os.makedirs(output_dir, exist_ok=True) torch.save( { "model_state_dict": model_state_dict, "optimizer_state_dict": optim_state_dict, "global_steps": self.global_steps, }, os.path.join(output_dir, ATORCH_CHECKPOINT_NAME), ) torch.distributed.barrier() # other rank waiting def _save_famo_checkpoint(self, output_dir): famo_dir = os.path.join(output_dir, 'famo_checkpoint/') os.makedirs(famo_dir, exist_ok=True) if self.famo.w_opt is not None: optim_state = self.famo.w_opt.state_dict() famo_state = {'prev_train_loss': self.famo.prev_train_loss, 'prev_valid_loss': self.famo.prev_valid_loss, 'first_train_step': self.famo.first_train_step, 'first_valid_step': self.famo.first_valid_step, 'ratio_valid_task_loss_prev': self.famo.ratio_valid_task_loss_prev, 'w': self.famo.w, 'w_opt_state': optim_state, } famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' torch.save(famo_state, os.path.join(famo_dir, famo_state_name)) # if self.is_rank0(): # torch.save(famo_state, os.path.join(output_dir, FAMO_CHECKPOINT_NAME)) torch.distributed.barrier() # other rank waiting def save(self, suffix=None, metrics=None): logger.info('Save start') if self.is_rank0(): print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) if not self.save_pytorch_model_bin_checkpoint: return if suffix is None: checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.global_steps}" else: checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{suffix}" run_dir = self.args.output_dir output_dir = os.path.join(run_dir, checkpoint_folder) # self._save_model(output_dir) # 获取要存的state_dict, 每个rank都要调用 if isinstance(self.model, FSDP): save_policy = FullStateDictConfig(offload_to_cpu=atorch.world_size() > 1, rank0_only=atorch.world_size() > 1) with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, save_policy): model_state_dict = self.model.state_dict() optim_state_dict = FSDP.full_optim_state_dict(self.model, self.optimizer) # may be removed after PyTorch 2.2 else: model_state_dict = unwrap_model(self.model).state_dict() optim_state_dict = self.optimizer.state_dict() if not self.no_save_atorch_checkpoint: if self.args.peft_type is None or not self.no_save_base_model: if self.is_rank0(): os.makedirs(output_dir, exist_ok=True) torch.save( { "model_state_dict": model_state_dict, "optimizer_state_dict": optim_state_dict, "global_steps": self.global_steps, }, os.path.join(output_dir, ATORCH_CHECKPOINT_NAME), ) torch.distributed.barrier() # other rank waiting if self.args.peft_type is not None: print(f'no_save_base_model: {self.no_save_base_model}') if not self.no_save_base_model: self._save_model(output_dir=output_dir) self._save_peft_model(output_dir=output_dir) else: self._save_model(output_dir=output_dir) # if not self.no_save_atorch_checkpoint: # self._save_atorch_checkpoint(output_dir) # else: # torch.save(self.optimizer.state_dict(), # os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) # Save RNG state in non-distributed training rng_states = { "python": random.getstate(), "numpy": np.random.get_state(), "cpu": torch.random.get_rng_state(), } if torch.cuda.is_available(): if self.args.local_rank == -1: # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) rng_states["cuda"] = torch.cuda.random.get_rng_state_all() else: rng_states["cuda"] = torch.cuda.random.get_rng_state() os.makedirs(output_dir, exist_ok=True) if torch.distributed.get_world_size() <= 1: torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) else: # torch.save(rng_states, os.path.join( # output_dir, f"rng_state_{self.args.process_index}.pth")) # process_index = rank torch.save(rng_states, os.path.join( output_dir, f"rng_state_{self.rank}.pth")) if self.args_to_save: json.dump(self.args_to_save, open(os.path.join(output_dir, HYPER_PARAMETER_NAME), 'w'), ensure_ascii=False, indent=2) # save state state = {'global_steps': self.global_steps} json.dump(state, open(os.path.join( output_dir, TRAINER_STATE_NAME), 'w'), ensure_ascii=False, indent=2) # if self.args.weighted_loss_mode == "selfpaced" or self.args.weighted_loss_mode.startswith('famo_valid_ema'): # ema_dir = os.path.join(output_dir, 'ema_checkpoint/') # os.makedirs(ema_dir, exist_ok=True) # ema_state = {'valid_task_loss_prev': self.valid_task_loss_prev, # 'ema_valid_task_loss_prev': self.ema_valid_task_loss_prev, # 'ratio_valid_task_loss_prev': self.ratio_valid_task_loss_prev} # ema_state_name = EMA_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' # torch.save(ema_state, os.path.join(ema_dir, ema_state_name)) # torch.distributed.barrier() # other rank waiting # print_rank_0(f'ema state to save: {ema_state}') # if self.files_to_save: # for name in self.files_to_save: # if not os.path.exists(name): # continue # try: # if os.path.isfile(name): # shutil.copy(name, output_dir) # elif os.path.isdir(name): # shutil.copytree(name, os.path.join( # output_dir, os.path.basename(name))) # except Exception: # continue # Determine the new best metric / best model checkpoint if metrics is not None and self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("valid_"): metric_to_check = f"valid_{metric_to_check}" metric_value = metrics[metric_to_check] operator = np.greater if self.args.greater_is_better == 'true' else np.less if ( self.best_metric is None or self.best_model_checkpoint is None or operator(metric_value, self.best_metric) ): self.best_metric = metric_value self.best_model_checkpoint = output_dir print_rank_0(f'current best model checkpoint is: {self.best_model_checkpoint}, valid_loss: {self.best_metric}') if self.args.weighted_loss_mode.startswith('famo_valid'): self._save_famo_checkpoint(output_dir) if self.is_rank0(): if self.args.extra_save_by_epoch: print('extra_save_by_epoch') # 如果是每个epoch extra save的,那么每个epoch的checkpoint不会删除,不受save_total_limit的影响, # 而对按step存的,则会只保留save_total_limit个 self._rotate_checkpoints( output_dir=run_dir, prefix=PREFIX_CHECKPOINT_DIR, checkpoint_name_pattern='([0-9]+)$') else: self._rotate_checkpoints( output_dir=run_dir, prefix=PREFIX_CHECKPOINT_DIR) # 只保留最新一个checkpoint的atorch checkpoint self._clean_atorch_checkpoints( output_dir=run_dir, prefix=PREFIX_CHECKPOINT_DIR) print(torch.cuda.memory_summary(device=self.device, abbreviated=False)) torch.distributed.barrier() logger.info('Save finished') def self_paced_evaluate(self, global_steps, current_epoch, v_batch, ratio_valid_task_loss_prev): self.model.eval() # v_batch = next(valid_iterator) with torch.no_grad(): valid_outputs = self.model( input_ids=v_batch['input_ids'], attention_mask=v_batch['attention_mask'], position_ids=v_batch['position_ids'] ) self.selfpaced_status.update(global_steps, current_epoch, 'valid', ratio_valid_task_loss_prev) _, valid_task_loss, valid_task_num, _, self.selfpaced_status = self.loss_func(valid_outputs, v_batch, self.args.weighted_loss_mode, self.famo, self.selfpaced_status) torch.distributed.all_reduce(valid_task_loss, op=torch.distributed.ReduceOp.SUM) valid_task_loss /= torch.distributed.get_world_size() # print_rank_0(f"self paced valid loss: {valid_task_loss}") return valid_task_loss def train(self, **kwargs): logger.info("***** Running training *****") logger.info(f" Num examples = {len(self.train_dataset)}") logger.info(f" Num Epochs = {self.args.num_train_epochs}") logger.info( f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {self.total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {self.args.max_steps}") progress_bar = tqdm(range(self.args.max_steps), disable=not is_local_main_process(), smoothing=0) training_time = 0 self.global_steps = 0 start_epoch = 0 steps_trained_in_current_epoch = 0 exit_flag = False if self.args.resume_from_checkpoint == 'true': state = json.load( open(os.path.join(self.resume_checkpoint_dir, TRAINER_STATE_NAME), 'r')) self.global_steps = state.get('global_steps', 0) # progress_bar.update(self.global_steps) progress_bar = tqdm(range(self.args.max_steps), disable=not is_local_main_process(), initial=self.global_steps, smoothing=0) start_epoch = self.global_steps // self.num_update_steps_per_epoch steps_trained_in_current_epoch = self.global_steps % self.num_update_steps_per_epoch steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps print(f'Start training at step {self.global_steps}') self.last_step_logged = self.global_steps self.skipped_steps = 0 self.accumulated_loss = 0 self.accumulated_task_loss = np.zeros(len(self.ID2TASK)) self.accumulated_task_num = np.zeros(len(self.ID2TASK)) self.args.selfpaced_interval = 20 if self.args.total_model_param >= 1e9 else 20 self.args.famo_interval = 10 self.train_task_loss_prev = None self.valid_task_loss_prev = None # L_valid at step t-1 self.ema_valid_task_loss_prev = None # L_valid_ema at step t-2 self.ratio_valid_task_loss_prev = torch.zeros(len(self.ID2TASK)).to(self.device) # ema ratio at step t-1 if self.args.weighted_loss_mode.startswith('famo_valid_ema'): if self.args.resume_from_checkpoint == 'true': # TODO pass # ema_dir = os.path.join(self.resume_checkpoint_dir, 'ema_checkpoint/') # if not os.path.exists(ema_dir): # print_rank_0(f'can not find the famo checkpoint dir!') # else: # ema_state_name = EMA_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' # ema_checkpoint_state = torch.load(os.path.join(ema_dir, ema_state_name)) # self.valid_task_loss_prev = ema_checkpoint_state['valid_task_loss_prev'].to(self.device) # L_valid at step t-1 # self.ema_valid_task_loss_prev = ema_checkpoint_state['ema_valid_task_loss_prev'].to(self.device) # L_valid_ema at step t-2 # self.ratio_valid_task_loss_prev = ema_checkpoint_state['ratio_valid_task_loss_prev'].to(self.device) # ema ratio at step t-1 # print_rank_0(f'trainer state from resume: {ema_checkpoint_state}') else: metrics = self.evaluate() self.ema_valid_task_loss_prev = metrics['valid_task_loss'] self.famo.global_steps = self.global_steps if self.args.weighted_loss_mode == "selfpaced": self.ema_valid_task_loss_prev = self.self_paced_evaluate(self.global_steps, start_epoch + 1, next(iter(self.valid_dataloader)), self.ratio_valid_task_loss_prev) for epoch in range(start_epoch, int(self.args.num_train_epochs)): self.train_dataloader.set_epoch(epoch) self.model.train() start_time = time.time() valid_iterator = iter(self.valid_dataloader) for step, batch in enumerate(self.train_dataloader): if step == 0: print_rank_0(f"step 1 batch shape: {batch['input_ids'].shape},\n" f"last 10 tokens: {batch['input_ids'][:, -10:]}") # f"last 10 loss mask: {batch['loss_mask'][:, -10:]}" print_rank_0(f"first 1000 tokens") for pt in range(10): print_rank_0(f"{batch['input_ids'][:, 10 * pt:10 * pt + 10]}") # print_rank_0(f"{batch['loss_mask'][:, 10 * pt:10 * pt + 10]}") # self.global_steps += 1 skipped = False self.model.train() step_start = time.time() if steps_trained_in_current_epoch and step < steps_trained_in_current_epoch: continue steps_trained_in_current_epoch = 0 # 恢复到上一次的steps in current epoch后,需要置零,否则后面的每个epoch都会跳过前面的steps # batch = self.prepare_input(batch, self.device) outputs = self.model( input_ids=batch['input_ids'].to(self.device), attention_mask=batch['attention_mask'].to(self.device), position_ids=batch['position_ids'].to(self.device), # labels=batch['labels'], ) # self paced loss # ema_tmp: L_valid_ema at step t-1 if (self.global_steps + 1) % self.args.selfpaced_interval == 0 and (self.args.weighted_loss_mode.startswith('famo_valid_ema') or self.args.weighted_loss_mode == 'selfpaced'): ema_tmp = 0.001 * self.valid_task_loss_prev + (1 - 0.001) * self.ema_valid_task_loss_prev self.ratio_valid_task_loss_prev = (ema_tmp - self.ema_valid_task_loss_prev ) / (self.ema_valid_task_loss_prev + 1e-6) self.ema_valid_task_loss_prev = ema_tmp self.ratio_valid_task_loss_prev = torch.where(self.ratio_valid_task_loss_prev > 0, -1e20, self.ratio_valid_task_loss_prev) self.famo.ratio_valid_task_loss_prev = self.ratio_valid_task_loss_prev if self.args.weighted_loss_mode == 'selfpaced': self.selfpaced_status.update(self.global_steps + 1, epoch + 1, 'train', self.ratio_valid_task_loss_prev) loss, task_loss, task_num, self.famo, self.selfpaced_status = self.loss_func(outputs, batch, self.args.weighted_loss_mode, self.famo, self.selfpaced_status) # print(f'rank: {self.rank}, loss: {loss}, task loss: {task_loss}') if self.args.weighted_loss_mode.startswith('famo'): self.famo.print_loss = self.famo.print_loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=self.famo.print_loss.device, dtype=self.famo.print_loss.dtype) loss_tensor[0] = self.famo.print_loss.item() else: loss = loss / self.args.gradient_accumulation_steps loss_tensor = torch.zeros( [1], device=loss.device, dtype=loss.dtype) loss_tensor[0] = loss.item() torch.distributed.all_reduce(loss_tensor) torch.distributed.all_reduce(task_loss, op=torch.distributed.ReduceOp.SUM) torch.distributed.all_reduce(task_num, op=torch.distributed.ReduceOp.SUM) reduce_loss = loss_tensor.sum() / torch.distributed.get_world_size() if has_inf_or_nan(reduce_loss): print_rank_0(f'There have nan loss.') self.skipped_steps += 1 skipped = True else: self.accumulated_loss += reduce_loss.item() mean_task_loss = task_loss / torch.distributed.get_world_size() self.accumulated_task_loss += mean_task_loss.cpu().numpy() self.accumulated_task_num += task_num.cpu().numpy() loss.backward() self.global_steps += 1 self.famo.global_steps += 1 if step % self.args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1: if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) self.optimizer.step() overflow = hasattr(self.optimizer, "step_was_skipped") and self.optimizer.step_was_skipped if skipped != overflow: print(f'skipped != overflow!!!!!!!!!!!!!!!!') if not overflow: self.lr_scheduler.step() # if not skipped: # self.optimizer.step() # self.lr_scheduler.step() self.optimizer.zero_grad() if (self.args.weighted_loss_mode.startswith('famo_valid') or self.args.weighted_loss_mode == 'selfpaced') and not skipped: # if self.args.weighted_loss_mode.startswith('famo_valid') and not skipped and step % self.args.famo_interval == 0: # delete caches # gc.collect() # torch.cuda.empty_cache() # self.model.eval() # if (step // self.args.famo_interval) % self.valid_dataloader_length == 0: if step % self.valid_dataloader_length == 0: valid_iterator = iter(self.valid_dataloader) valid_step = step % self.valid_dataloader_length if self.famo_resume: self.famo_resume = False for i in range(valid_step): v_batch = next(valid_iterator) v_batch = next(valid_iterator) if self.args.weighted_loss_mode == 'selfpaced': self.valid_task_loss_prev = self.self_paced_evaluate(self.global_steps, epoch + 1, v_batch, self.ratio_valid_task_loss_prev) else: with torch.autocast(device_type='cuda', dtype=torch.bfloat16): valid_outputs = self.model( input_ids=v_batch['input_ids'].to(self.device), attention_mask=v_batch['attention_mask'].to(self.device), position_ids=v_batch['position_ids'].to(self.device) ) valid_loss, valid_task_loss, valid_task_num, _, _ = self.loss_func(valid_outputs, v_batch, self.args.weighted_loss_mode) if self.args.weighted_loss_mode.startswith('famo_valid_ema'): torch.distributed.all_reduce(valid_task_loss, op=torch.distributed.ReduceOp.SUM) valid_task_loss /= torch.distributed.get_world_size() self.valid_task_loss_prev = valid_task_loss.clone().detach() # if self.famo.first_valid_step and self.args.resume_from_checkpoint == 'true': if self.args.resume_from_checkpoint == 'true': self.ema_valid_task_loss_prev = self.valid_task_loss_prev # self.scaler.scale(valid_loss).backward() valid_loss.backward() valid_loss_item = valid_loss.item() if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0: # 如果是fp16,需要unscale。如果是bf16,self.optimizer里没有unscale这个方法 try: self.optimizer.unscale_() except Exception: pass if isinstance(self.model, FSDP): self.model.clip_grad_norm_(self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm) # print_rank_0(f'valid_loss {valid_loss.item()}, task loss: {valid_task_loss}, valid step: {valid_step}') self.famo.update(valid_task_loss) self.optimizer.zero_grad() step_time = time.time() - step_start
step_tflops = get_tflops_megatron(self.args.total_model_param, self.args.hidden_size, self.args.num_hidden_layers,
0
2023-11-02 01:37:01+00:00
16k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "ImplicitFourierVolume", "path": "cryostar/nerf/volume_utils.py", "snippet": "class ImplicitFourierVolume(nn.Module):\n\n def __init__(self, z_dim, img_sz, mask_rad, params_implicit):\n \"\"\"\n Initialization of an implicit representation of the volume in Fourier space.\n\n Parameters\n ----------\n img_sz: int\n params_implicit: dictionary\n \"\"\"\n super().__init__()\n self.img_sz = img_sz\n self.z_dim = z_dim\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y] = torch.meshgrid([lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Y, X, torch.zeros_like(X)], dim=-1)\n coords = shift_coords(coords, 1., 1., 0, img_sz, img_sz, 1)\n self.register_buffer('plane_coords', coords.reshape(-1, 3))\n\n self.mask_rad = mask_rad\n if self.mask_rad != 1:\n mask = create_circular_mask(img_sz, img_sz, None, self.mask_rad / 2 * img_sz)\n plane_window_mask = torch.from_numpy(mask).reshape(-1)\n self.register_buffer('plane_window_mask', plane_window_mask)\n sphere_mask = torch.from_numpy(\n create_sphere_mask(self.img_sz, self.img_sz, self.img_sz, radius=self.mask_rad / 2 * self.img_sz)\n )\n self.register_buffer(\"sphere_mask\", sphere_mask)\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y, Z] = torch.meshgrid([lincoords, lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Z, Y, X], dim=-1)\n coords = shift_coords(coords, 1., 1., 1., img_sz, img_sz, img_sz)\n self.register_buffer('coords_3d', coords.reshape(-1, 3))\n\n self.fvol = FourierNet(net_type=params_implicit[\"net_type\"],\n z_dim=z_dim,\n pe_dim=params_implicit[\"pe_dim\"],\n pe_type=params_implicit[\"pe_type\"],\n D=params_implicit[\"D\"],\n hidden_dim=params_implicit[\"hidden\"],\n force_symmetry=params_implicit['force_symmetry'])\n\n def forward(self, z, rotmat):\n \"\"\"\n Generates a slice in Fourier space from a rotation matrix.\n\n Parameters\n ----------\n rotmat: torch.Tensor (B, 3, 3)\n\n Returns\n -------\n fplane: torch.Tensor (B, 1, img_sz, img_sz) (complex)\n \"\"\"\n if self.z_dim == 0:\n assert z is None\n batch_sz = rotmat.shape[0]\n\n with torch.autocast(\"cuda\", enabled=False):\n assert self.plane_coords.dtype == torch.float32\n assert rotmat.dtype == torch.float32\n rot_plane_coords = torch.bmm(self.plane_coords.repeat(batch_sz, 1, 1), rotmat) # B, img_sz^2, 3\n\n if self.mask_rad != 1:\n coords_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c3\", bsz=batch_sz, c3=3)\n rot_plane_coords = rot_plane_coords[coords_mask].reshape(batch_sz, -1, 3) # B, mask_num, 3\n\n fplane = self.fvol(z, rot_plane_coords) # B, _, 1/2\n\n if self.mask_rad != 1:\n unmask_fplane = fplane.new_zeros(batch_sz, self.img_sz * self.img_sz, self.fvol.out_features)\n value_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c\", bsz=batch_sz, c=self.fvol.out_features)\n unmask_fplane[value_mask] = fplane.reshape(-1)\n fplane = unmask_fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n else:\n fplane = fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n\n if self.fvol.out_features == 2:\n fplane = torch.view_as_complex(fplane) # B, img_sz, img_sz\n else:\n fplane = batch_hartley_to_fourier_2d(fplane.squeeze(-1)) # B, img_sz, img_sz\n\n fplane = fplane[:, None, :, :]\n return fplane\n\n def make_volume(self, z):\n with torch.no_grad():\n with torch.autocast(\"cuda\", enabled=False):\n coords = self.coords_3d.unsqueeze(0)\n num_coords = coords.shape[1]\n chunk_size = 128**2 * 32\n exp_fvol = []\n for sid in range(0, num_coords, chunk_size):\n eid = sid + chunk_size\n exp_fvol.append(self.fvol(z, coords[:, sid:eid]))\n exp_fvol = torch.cat(exp_fvol, dim=1)\n if self.fvol.out_features == 2:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz, 2)\n exp_fvol = torch.view_as_complex(exp_fvol)\n else:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz)\n exp_fvol = hartley_to_fourier_3d(exp_fvol)\n\n exp_fvol[~self.sphere_mask] = 0.0\n exp_vol = fourier_to_primal_3d(exp_fvol).real\n return exp_vol" }, { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "FourierGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class FourierGridTranslate(torch.nn.Module):\n \"\"\"\n DFT's translation is:\n `f(x - x0, y - y0) <=> F(u, v) exp(-2 j \\pi (x0 u + y0 v) / N )`\n where `x, y, u, v` all have a range of `N`, so `(x0 u + y0 v) / N \\in (0, N)`\n\n Here we initialize the `u, v` coordinates between `(-0.5, 0.5)` so that the \n range is 1, where the `1/N` term can be ignored.\n\n See also: https://dsp.stackexchange.com/questions/40228/translation-property-of-2-d-discrete-fourier-transform\n\n Important notes:\n If `N=4`, the coordinates u will be `[-0.5, -0.17, 0.17, 0.5]`, but the \n `fft`ed image's frequency is `[-0.50, -0.25, 0.00, 0.25]`, so we have to \n add some corrections:\n - right-shift `u` to be `[-0.50, -0.25, 0.00, 0.25]`\n - perform multiplication\n\n \"\"\"\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2) / 2\n # yapf: enable\n coords = shift_coords(coords, 0.5, 0.5, None, self.D, self.D, None, False)\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n images = einops.rearrange(images, \"B NY NX -> B 1 (NY NX)\")\n delta = trans @ self.coords.t() * -2j * torch.pi\n images_trans = torch.exp(delta) * images\n images_trans = einops.rearrange(images_trans, \"B T (NY NX) -> B T NY NX\", NY=self.D, NX=self.D)\n return images_trans" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "sample_along_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def sample_along_pca(z: np.ndarray, pca_dim=1, num=5) -> np.ndarray:\n assert isinstance(z, np.ndarray)\n pc, pca = run_pca(z)\n start = np.percentile(pc[:, pca_dim - 1], 5)\n stop = np.percentile(pc[:, pca_dim - 1], 95)\n z_pc_traj = get_pc_traj(pca, z.shape[1], num, pca_dim, start, stop)\n point, point_id = get_nearest_point(z, z_pc_traj)\n return point, point_id" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "pl_init_exp", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "VAEEncoder", "path": "cryostar/utils/ml_modules.py", "snippet": "class VAEEncoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.mean_layer = Linear(self.hidden_dim[-1], out_dim)\n self.var_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n mean = self.mean_layer(x)\n log_var = self.var_layer(x)\n return mean, log_var" }, { "identifier": "reparameterize", "path": "cryostar/utils/ml_modules.py", "snippet": "def reparameterize(mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mu + eps * std" }, { "identifier": "save_mrc", "path": "cryostar/utils/mrc_tools.py", "snippet": "def save_mrc(vol,\n path,\n voxel_size: Union[int, float, Tuple, np.recarray] = None,\n origin: Union[int, float, Tuple, np.recarray] = None):\n \"\"\"\n Save volumetric data to mrc file, set voxel_size, origin.\n See Also: https://mrcfile.readthedocs.io/en/stable/source/mrcfile.html#mrcfile.mrcobject.MrcObject.voxel_size\n Args:\n vol: density volume\n path: save path\n voxel_size: a single number, a 3-tuple (x, y ,z) or a modified version of the voxel_size array, default 1.\n origin: a single number, a 3-tuple (x, y ,z) or a modified version of the origin array, default 0.\n\n \"\"\"\n with mrcfile.new(path, overwrite=True) as m:\n m.set_data(vol)\n\n if voxel_size is not None:\n m.voxel_size = voxel_size\n\n if origin is not None:\n m.header.origin = origin" } ]
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
10,998
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input)
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input)
z = reparameterize(mu, log_var)
15
2023-11-06 07:15:26+00:00
16k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/sew/configuration_sew.py
[ { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for loading/downloading/saving configurations.\n\n <Tip>\n\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to\n initialize a model does **not** load the model weights. It only affects the model's configuration.\n\n </Tip>\n\n Class attributes (overridden by derived classes):\n\n - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate\n the correct object in [`~transformers.AutoConfig`].\n - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the\n config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:\n [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].\n - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary\n outputs of the model during inference.\n - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized\n naming of attributes.\n\n Common attributes (present in all subclasses):\n\n - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the\n embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).\n - **hidden_size** (`int`) -- The hidden size of the model.\n - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the\n model.\n - **num_hidden_layers** (`int`) -- The number of blocks in the model.\n\n Arg:\n name_or_path (`str`, *optional*, defaults to `\"\"`):\n Store the string that was passed to [`PreTrainedModel.from_pretrained`] or\n [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created\n with such a method.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not the model should return all hidden-states.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not the model should returns all attentions.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.\n is_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as an encoder/decoder or not.\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as decoder or not (in which case it's used as an encoder).\n cross_attention_hidden_size** (`bool`, *optional*):\n The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder\n setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.\n add_cross_attention (`bool`, *optional*, defaults to `False`):\n Whether cross-attention layers should be added to the model. Note, this option is only relevant for models\n that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models\n in `AUTO_MODELS_FOR_CAUSAL_LM`.\n tie_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder\n and decoder model to have the exact same parameter names.\n prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):\n Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of\n heads to prune in said layer.\n\n For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n chunk_size_feed_forward (`int`, *optional*, defaults to `0`):\n The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that\n the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <\n sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed\n Forward Chunking work?](../glossary.html#feed-forward-chunking).\n\n > Parameters for sequence generation\n\n max_length (`int`, *optional*, defaults to 20):\n Maximum length that will be used by default in the `generate` method of the model.\n min_length (`int`, *optional*, defaults to 0):\n Minimum length that will be used by default in the `generate` method of the model.\n do_sample (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;\n use greedy decoding otherwise.\n early_stopping (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search\n when at least `num_beams` sentences are finished per batch or not.\n num_beams (`int`, *optional*, defaults to 1):\n Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means\n no beam search.\n num_beam_groups (`int`, *optional*, defaults to 1):\n Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams\n that will be used by default in the `generate` method of the model. 1 means no group beam search.\n diversity_penalty (`float`, *optional*, defaults to 0.0):\n Value to control diversity for group beam search. that will be used by default in the `generate` method of\n the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.\n temperature (`float`, *optional*, defaults to 1.0):\n The value used to module the next token probabilities that will be used by default in the `generate` method\n of the model. Must be strictly positive.\n top_k (`int`, *optional*, defaults to 50):\n Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in\n the `generate` method of the model.\n top_p (`float`, *optional*, defaults to 1):\n Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,\n only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.\n typical_p (`float`, *optional*, defaults to 1):\n Local typicality measures how similar the conditional probability of predicting a target token next is to\n the expected conditional probability of predicting a random token next, given the partial text already\n generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that\n add up to `typical_p` or higher are kept for generation. See [this\n paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.\n repetition_penalty (`float`, *optional*, defaults to 1):\n Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0\n means no penalty.\n length_penalty (`float`, *optional*, defaults to 1):\n Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to\n the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log\n likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while\n `length_penalty` < 0.0 encourages shorter sequences.\n no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the\n `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can\n only occur once.\n encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by\n default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all\n ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.\n bad_words_ids (`List[int]`, *optional*):\n List of token ids that are not allowed to be generated that will be used by default in the `generate`\n method of the model. In order to get the tokens of the words that should not appear in the generated text,\n use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n num_return_sequences (`int`, *optional*, defaults to 1):\n Number of independently computed returned sequences for each element in the batch that will be used by\n default in the `generate` method of the model.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether the model should return the logits when used for generation.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.\n forced_bos_token_id (`int`, *optional*):\n The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for\n multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target\n language token.\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached.\n remove_invalid_values (`bool`, *optional*):\n Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.\n Note that using `remove_invalid_values` can slow down generation.\n\n > Parameters for fine-tuning tasks\n\n architectures (`List[str]`, *optional*):\n Model architectures that can be used with the model pretrained weights.\n finetuning_task (`str`, *optional*):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow\n or PyTorch) checkpoint.\n id2label (`Dict[int, str]`, *optional*):\n A map from index (for instance prediction index, or target index) to label.\n label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.\n num_labels (`int`, *optional*):\n Number of labels to use in the last layer added to the model, typically for a classification task.\n task_specific_params (`Dict[str, Any]`, *optional*):\n Additional keyword arguments to store for the current task.\n problem_type (`str`, *optional*):\n Problem type for `XxxForSequenceClassification` models. Can be one of `\"regression\"`,\n `\"single_label_classification\"` or `\"multi_label_classification\"`.\n\n > Parameters linked to the tokenizer\n\n tokenizer_class (`str`, *optional*):\n The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the\n model by default).\n prefix (`str`, *optional*):\n A specific prompt that should be added at the beginning of each text before calling the model.\n bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.\n pad_token_id (`int`, *optional*): The id of the _padding_ token.\n eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.\n decoder_start_token_id (`int`, *optional*):\n If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.\n sep_token_id (`int`, *optional*): The id of the _separation_ token.\n\n > PyTorch specific parameters\n\n torchscript (`bool`, *optional*, defaults to `False`):\n Whether or not the model should be used with Torchscript.\n tie_word_embeddings (`bool`, *optional*, defaults to `True`):\n Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the\n model has a output word embedding layer.\n torch_dtype (`str`, *optional*):\n The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`\n (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved\n model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load\n `float16` weights. Since the config object is stored in plain text, this attribute contains just the\n floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the\n `\"float16\"` string.\n\n This attribute is currently not being used during model loading time, but this may change in the future\n versions. But we can already start preparing for the future by saving the dtype with save_pretrained.\n\n > TensorFlow specific parameters\n\n use_bfloat16 (`bool`, *optional*, defaults to `False`):\n Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).\n tf_legacy_loss (`bool`, *optional*, defaults to `False`):\n Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may\n not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers\n v5.\n \"\"\"\n model_type: str = \"\"\n is_composition: bool = False\n attribute_map: Dict[str, str] = {}\n _auto_class: Optional[str] = None\n\n def __setattr__(self, key, value):\n if key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n super().__setattr__(key, value)\n\n def __getattribute__(self, key):\n if key != \"attribute_map\" and key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n return super().__getattribute__(key)\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.return_dict = kwargs.pop(\"return_dict\", True)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.torch_dtype = kwargs.pop(\"torch_dtype\", None) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.tf_legacy_loss = kwargs.pop(\"tf_legacy_loss\", False) # Only used by TensorFlow models\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n self.tie_word_embeddings = kwargs.pop(\n \"tie_word_embeddings\", True\n ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_encoder_decoder = kwargs.pop(\"is_encoder_decoder\", False)\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n self.cross_attention_hidden_size = kwargs.pop(\"cross_attention_hidden_size\", None)\n self.add_cross_attention = kwargs.pop(\"add_cross_attention\", False)\n self.tie_encoder_decoder = kwargs.pop(\"tie_encoder_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.min_length = kwargs.pop(\"min_length\", 0)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.early_stopping = kwargs.pop(\"early_stopping\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.num_beam_groups = kwargs.pop(\"num_beam_groups\", 1)\n self.diversity_penalty = kwargs.pop(\"diversity_penalty\", 0.0)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.typical_p = kwargs.pop(\"typical_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.no_repeat_ngram_size = kwargs.pop(\"no_repeat_ngram_size\", 0)\n self.encoder_no_repeat_ngram_size = kwargs.pop(\"encoder_no_repeat_ngram_size\", 0)\n self.bad_words_ids = kwargs.pop(\"bad_words_ids\", None)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n self.chunk_size_feed_forward = kwargs.pop(\"chunk_size_feed_forward\", 0)\n self.output_scores = kwargs.pop(\"output_scores\", False)\n self.return_dict_in_generate = kwargs.pop(\"return_dict_in_generate\", False)\n self.forced_bos_token_id = kwargs.pop(\"forced_bos_token_id\", None)\n self.forced_eos_token_id = kwargs.pop(\"forced_eos_token_id\", None)\n self.remove_invalid_values = kwargs.pop(\"remove_invalid_values\", False)\n self.exponential_decay_length_penalty = kwargs.pop(\"exponential_decay_length_penalty\", None)\n self.suppress_tokens = kwargs.pop(\"suppress_tokens\", None)\n self.begin_suppress_tokens = kwargs.pop(\"begin_suppress_tokens\", None)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.id2label = kwargs.pop(\"id2label\", None)\n self.label2id = kwargs.pop(\"label2id\", None)\n if self.label2id is not None and not isinstance(self.label2id, dict):\n raise ValueError(\"Argument label2id should be a dictionary.\")\n if self.id2label is not None:\n if not isinstance(self.id2label, dict):\n raise ValueError(\"Argument id2label should be a dictionary.\")\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None and len(self.id2label) != num_labels:\n logger.warning(\n f\"You passed along `num_labels={num_labels}` with an incompatible id to label map: \"\n f\"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}.\"\n )\n self.id2label = {int(key): value for key, value in self.id2label.items()}\n # Keys are always strings in JSON so convert ids to int here.\n else:\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n\n if self.torch_dtype is not None and isinstance(self.torch_dtype, str):\n # we will start using self.torch_dtype in v5, but to be consistent with\n # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object\n if is_torch_available():\n import torch\n\n self.torch_dtype = getattr(torch, self.torch_dtype)\n\n # Tokenizer arguments TODO: eventually tokenizer and models should share the same config\n self.tokenizer_class = kwargs.pop(\"tokenizer_class\", None)\n self.prefix = kwargs.pop(\"prefix\", None)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_id = kwargs.pop(\"eos_token_id\", None)\n self.sep_token_id = kwargs.pop(\"sep_token_id\", None)\n\n self.decoder_start_token_id = kwargs.pop(\"decoder_start_token_id\", None)\n\n # task specific arguments\n self.task_specific_params = kwargs.pop(\"task_specific_params\", None)\n\n # regression / multi-label classification\n self.problem_type = kwargs.pop(\"problem_type\", None)\n allowed_problem_types = (\"regression\", \"single_label_classification\", \"multi_label_classification\")\n if self.problem_type is not None and self.problem_type not in allowed_problem_types:\n raise ValueError(\n f\"The config parameter `problem_type` was not understood: received {self.problem_type} \"\n \"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid.\"\n )\n\n # TPU arguments\n if kwargs.pop(\"xla_device\", None) is not None:\n logger.warning(\n \"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can \"\n \"safely remove it from your `config.json` file.\"\n )\n\n # Name or path to the pretrained checkpoint\n self._name_or_path = str(kwargs.pop(\"name_or_path\", \"\"))\n # Config hash\n self._commit_hash = kwargs.pop(\"_commit_hash\", None)\n\n # Drop the transformers version info\n self.transformers_version = kwargs.pop(\"transformers_version\", None)\n\n # Deal with gradient checkpointing\n if kwargs.get(\"gradient_checkpointing\", False):\n warnings.warn(\n \"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 \"\n \"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the \"\n \"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.\"\n )\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n @property\n def name_or_path(self) -> str:\n return getattr(self, \"_name_or_path\", None)\n\n @name_or_path.setter\n def name_or_path(self, value):\n self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)\n\n @property\n def use_return_dict(self) -> bool:\n \"\"\"\n `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.\n \"\"\"\n # If torchscript is set, force `return_dict=False` to avoid jit errors\n return self.return_dict and not self.torchscript\n\n @property\n def num_labels(self) -> int:\n \"\"\"\n `int`: The number of labels for classification models.\n \"\"\"\n return len(self.id2label)\n\n @num_labels.setter\n def num_labels(self, num_labels: int):\n if not hasattr(self, \"id2label\") or self.id2label is None or len(self.id2label) != num_labels:\n self.id2label = {i: f\"LABEL_{i}\" for i in range(num_labels)}\n self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~PretrainedConfig.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self._set_token_in_kwargs(kwargs)\n\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = self._create_repo(repo_id, **kwargs)\n files_timestamps = self._get_files_timestamps(save_directory)\n\n # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be\n # loaded from the Hub.\n if self._auto_class is not None:\n custom_object_save(self, save_directory, config=self)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file, use_diff=True)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n self._upload_modified_files(\n save_directory,\n repo_id,\n files_timestamps,\n commit_message=commit_message,\n token=kwargs.get(\"token\"),\n )\n\n @staticmethod\n def _set_token_in_kwargs(kwargs, token=None):\n \"\"\"Temporary method to deal with `token` and `use_auth_token`.\n\n This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.\n\n Need to clean up `use_auth_token` in a follow PR.\n \"\"\"\n # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.\n if token is None:\n token = kwargs.pop(\"token\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n\n if use_auth_token is not None:\n warnings.warn(\n \"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\", FutureWarning\n )\n if token is not None:\n raise ValueError(\n \"`token` and `use_auth_token` are both specified. Please set only the argument `token`.\"\n )\n token = use_auth_token\n\n if token is not None:\n kwargs[\"token\"] = token\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n cache_dir: Optional[Union[str, os.PathLike]] = None,\n force_download: bool = False,\n local_files_only: bool = False,\n token: Optional[Union[str, bool]] = None,\n revision: str = \"main\",\n **kwargs,\n ) -> \"PretrainedConfig\":\n r\"\"\"\n Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained model configuration hosted inside a model repo on\n huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or\n namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.\n - a path to a *directory* containing a configuration file saved using the\n [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force to (re-)download the configuration files and override the cached versions if\n they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received file. Attempts to resume the download if such a file\n exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.\n token (`str` or `bool`, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use\n the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n To test a pull request you made on the Hub, you can pass `revision=\"refs/pr/<pr_number>\".\n\n </Tip>\n\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n If `False`, then this function returns just the final configuration object.\n\n If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the\n part of `kwargs` which has not been used to update `config` and is otherwise ignored.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can\n specify the folder name here.\n kwargs (`Dict[str, Any]`, *optional*):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled\n by the `return_unused_kwargs` keyword parameter.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.\n\n Examples:\n\n ```python\n # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained(\n \"bert-base-uncased\"\n ) # Download configuration from huggingface.co and cache.\n config = BertConfig.from_pretrained(\n \"./test/saved_model/\"\n ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*\n config = BertConfig.from_pretrained(\"./test/saved_model/my_configuration.json\")\n config = BertConfig.from_pretrained(\"bert-base-uncased\", output_attentions=True, foo=False)\n assert config.output_attentions == True\n config, unused_kwargs = BertConfig.from_pretrained(\n \"bert-base-uncased\", output_attentions=True, foo=False, return_unused_kwargs=True\n )\n assert config.output_attentions == True\n assert unused_kwargs == {\"foo\": False}\n ```\"\"\"\n kwargs[\"cache_dir\"] = cache_dir\n kwargs[\"force_download\"] = force_download\n kwargs[\"local_files_only\"] = local_files_only\n kwargs[\"revision\"] = revision\n\n cls._set_token_in_kwargs(kwargs, token)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\n [`PretrainedConfig`] using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n\n Returns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.\n\n \"\"\"\n cls._set_token_in_kwargs(kwargs)\n\n original_kwargs = copy.deepcopy(kwargs)\n # Get config dict associated with the base config file\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"_commit_hash\" in config_dict:\n original_kwargs[\"_commit_hash\"] = config_dict[\"_commit_hash\"]\n\n # That config file may point us toward another config file to use.\n if \"configuration_files\" in config_dict:\n configuration_file = get_configuration_file(config_dict[\"configuration_files\"])\n config_dict, kwargs = cls._get_config_dict(\n pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs\n )\n\n return config_dict, kwargs\n\n @classmethod\n def _get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n token = kwargs.pop(\"token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n trust_remote_code = kwargs.pop(\"trust_remote_code\", None)\n subfolder = kwargs.pop(\"subfolder\", \"\")\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n commit_hash = kwargs.pop(\"_commit_hash\", None)\n\n if trust_remote_code is True:\n logger.warning(\n \"The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is\"\n \" ignored.\"\n )\n\n user_agent = {\"file_type\": \"config\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):\n # Special case when pretrained_model_name_or_path is a local file\n resolved_config_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n configuration_file = pretrained_model_name_or_path\n resolved_config_file = download_url(pretrained_model_name_or_path)\n else:\n configuration_file = kwargs.pop(\"_configuration_file\", CONFIG_NAME)\n\n try:\n # Load from local folder or from cache or download from model Hub and cache\n resolved_config_file = cached_file(\n pretrained_model_name_or_path,\n configuration_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n token=token,\n user_agent=user_agent,\n revision=revision,\n subfolder=subfolder,\n _commit_hash=commit_hash,\n )\n commit_hash = extract_commit_hash(resolved_config_file, commit_hash)\n except EnvironmentError:\n # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to\n # the original exception.\n raise\n except Exception:\n # For any other exception, we throw a generic error.\n raise EnvironmentError(\n f\"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it\"\n \" from 'https://huggingface.co/models', make sure you don't have a local directory with the same\"\n f\" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory\"\n f\" containing a {configuration_file} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(resolved_config_file)\n config_dict[\"_commit_hash\"] = commit_hash\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(\n f\"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.\"\n )\n\n if is_local:\n logger.info(f\"loading configuration file {resolved_config_file}\")\n else:\n logger.info(f\"loading configuration file {configuration_file} from cache at {resolved_config_file}\")\n\n if \"auto_map\" in config_dict and not is_local:\n config_dict[\"auto_map\"] = add_model_info_to_auto_map(\n config_dict[\"auto_map\"], pretrained_model_name_or_path\n )\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.\n\n Args:\n config_dict (`Dict[str, Any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be\n retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.\n kwargs (`Dict[str, Any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from those parameters.\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n # Those arguments may be passed along for our internal telemetry.\n # We remove them so they don't appear in `return_unused_kwargs`.\n kwargs.pop(\"_from_auto\", None)\n kwargs.pop(\"_from_pipeline\", None)\n # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.\n if \"_commit_hash\" in kwargs and \"_commit_hash\" in config_dict:\n kwargs[\"_commit_hash\"] = config_dict[\"_commit_hash\"]\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}\n\n # Update config with kwargs if needed\n if \"num_labels\" in kwargs and \"id2label\" in kwargs:\n num_labels = kwargs[\"num_labels\"]\n id2label = kwargs[\"id2label\"] if kwargs[\"id2label\"] is not None else []\n if len(id2label) != num_labels:\n raise ValueError(\n f\"You passed along `num_labels={num_labels }` with an incompatible id to label map: \"\n f\"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove \"\n \"one of them.\"\n )\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n current_attr = getattr(config, key)\n # To authorize passing a custom subconfig as kwarg in models that have nested configs.\n if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):\n value = current_attr.__class__(**value)\n setattr(config, key, value)\n if key != \"torch_dtype\":\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(f\"Model config {config}\")\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: Union[str, os.PathLike]) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.\n\n Args:\n json_file (`str` or `os.PathLike`):\n Path to the JSON file containing the parameters.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from that JSON file.\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n def to_diff_dict(self) -> Dict[str, Any]:\n \"\"\"\n Removes all attributes from config which correspond to the default config attributes for better readability and\n serializes to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n config_dict = self.to_dict()\n\n # get the default config dict\n default_config_dict = PretrainedConfig().to_dict()\n\n # get class specific config dict\n class_config_dict = self.__class__().to_dict() if not self.is_composition else {}\n\n serializable_config_dict = {}\n\n # only serialize values that differ from the default config\n for key, value in config_dict.items():\n if (\n isinstance(getattr(self, key, None), PretrainedConfig)\n and key in class_config_dict\n and isinstance(class_config_dict[key], dict)\n ):\n # For nested configs we need to clean the diff recursively\n diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))\n if \"model_type\" in value:\n # Needs to be set even if it's not in the diff\n diff[\"model_type\"] = value[\"model_type\"]\n if len(diff) > 0:\n serializable_config_dict[key] = diff\n elif (\n key not in default_config_dict\n or key == \"transformers_version\"\n or value != default_config_dict[key]\n or (key in class_config_dict and value != class_config_dict[key])\n ):\n serializable_config_dict[key] = value\n\n if hasattr(self, \"quantization_config\"):\n serializable_config_dict[\"quantization_config\"] = (\n self.quantization_config.to_dict()\n if not isinstance(self.quantization_config, dict)\n else self.quantization_config\n )\n\n self.dict_torch_dtype_to_str(serializable_config_dict)\n\n return serializable_config_dict\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n if \"_auto_class\" in output:\n del output[\"_auto_class\"]\n if \"_commit_hash\" in output:\n del output[\"_commit_hash\"]\n\n # Transformers version when serializing the model\n output[\"transformers_version\"] = __version__\n\n for key, value in output.items():\n # Deal with nested configs like CLIP\n if isinstance(value, PretrainedConfig):\n value = value.to_dict()\n del value[\"transformers_version\"]\n\n output[key] = value\n\n if hasattr(self, \"quantization_config\"):\n output[\"quantization_config\"] = (\n self.quantization_config.to_dict()\n if not isinstance(self.quantization_config, dict)\n else self.quantization_config\n )\n\n self.dict_torch_dtype_to_str(output)\n\n return output\n\n def to_json_string(self, use_diff: bool = True) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Args:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON file.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string(use_diff=use_diff))\n\n def update(self, config_dict: Dict[str, Any]):\n \"\"\"\n Updates attributes of this class with attributes from `config_dict`.\n\n Args:\n config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.\n \"\"\"\n for key, value in config_dict.items():\n setattr(self, key, value)\n\n def update_from_string(self, update_str: str):\n \"\"\"\n Updates attributes of this class with attributes from `update_str`.\n\n The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:\n \"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\n\n The keys to change have to already exist in the config object.\n\n Args:\n update_str (`str`): String with attributes that should be updated for this class.\n\n \"\"\"\n\n d = dict(x.split(\"=\") for x in update_str.split(\",\"))\n for k, v in d.items():\n if not hasattr(self, k):\n raise ValueError(f\"key {k} isn't in the original config dict\")\n\n old_v = getattr(self, k)\n if isinstance(old_v, bool):\n if v.lower() in [\"true\", \"1\", \"y\", \"yes\"]:\n v = True\n elif v.lower() in [\"false\", \"0\", \"n\", \"no\"]:\n v = False\n else:\n raise ValueError(f\"can't derive true or false from {v} (key {k})\")\n elif isinstance(old_v, int):\n v = int(v)\n elif isinstance(old_v, float):\n v = float(v)\n elif not isinstance(old_v, str):\n raise ValueError(\n f\"You can only update int, float, bool or string values in the config, got {v} for key {k}\"\n )\n\n setattr(self, k, v)\n\n def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:\n \"\"\"\n Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,\n converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *\"float32\"*\n string, which can then be stored in the json format.\n \"\"\"\n if d.get(\"torch_dtype\", None) is not None and not isinstance(d[\"torch_dtype\"], str):\n d[\"torch_dtype\"] = str(d[\"torch_dtype\"]).split(\".\")[1]\n for value in d.values():\n if isinstance(value, dict):\n self.dict_torch_dtype_to_str(value)\n\n @classmethod\n def register_for_auto_class(cls, auto_class=\"AutoConfig\"):\n \"\"\"\n Register this class with a given auto class. This should only be used for custom configurations as the ones in\n the library are already mapped with `AutoConfig`.\n\n <Tip warning={true}>\n\n This API is experimental and may have some slight breaking changes in the next releases.\n\n </Tip>\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoConfig\"`):\n The auto class to register this new configuration with.\n \"\"\"\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class" }, { "identifier": "logging", "path": "transformers/src/transformers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\ndef warning_once(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" } ]
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging
12,428
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SEW model configuration""" logger = logging.get_logger(__name__) SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew }
# coding=utf-8 # Copyright 2021 ASAPP Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SEW model configuration""" logger = logging.get_logger(__name__) SEW_PRETRAINED_CONFIG_ARCHIVE_MAP = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew }
class SEWConfig(PretrainedConfig):
0
2023-11-07 04:23:57+00:00
16k
HKU-BAL/ClairS-TO
src/realign_reads.py
[ { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "BASIC_BASES = set(\"ACGTU\")\nWARNING = '\\033[93m'\nERROR = '\\033[91m'\nENDC = '\\033[0m'\ndef log_error(log):\ndef log_warning(log):\ndef is_file_exists(file_name, suffix=\"\"):\ndef is_folder_exists(folder_name, suffix=\"\"):\ndef legal_range_from(param_name, x, min_num=None, max_num=None, exit_out_of_range=False):\ndef file_path_from(file_name, suffix=\"\", exit_on_not_found=False, sep=\"\", allow_none=False, is_directory=False):\ndef folder_path_from(folder_name, create_not_found=True, exit_on_not_found=False):\ndef is_command_exists(command):\ndef executable_command_string_from(command_to_execute, exit_on_not_found=False):\ndef subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):\ndef str_none(v):\ndef str2bool(v):\ndef region_from(ctg_name, ctg_start=None, ctg_end=None):\ndef reference_sequence_from(samtools_execute_command, fasta_file_path, regions):\ndef vcf_candidates_from(vcf_fn, contig_name=None):\ndef candidate_position_generator_from(\n candidate,\n flanking_base_num,\n begin_to_end\n):\ndef samtools_mpileup_generator_from(\n candidate,\n flanking_base_num,\n begin_to_end\n):\ndef samtools_view_process_from(\n ctg_name,\n ctg_start,\n ctg_end,\n samtools,\n bam_file_path\n):\n def __init__(self, ctg_name=None,\n genotype1=None,\n genotype2=None,\n pos=None,\n ref_base=None,\n alt_base=None,\n candidate=False,\n cigar_count=None,\n confident_variant=False,\n depth=None,\n alt_list=None,\n af=None,\n filter=None,\n af_list=None,\n alt_type_mapping_dict=None,\n extra_infos=\"\",\n qual=None,\n row_str=None):\n def update_info(self, ref_base, alt_base, genotype, extra_infos=\"\"):\n def __init__(self, pos, ref_base, depth, af_list, alt_dict, tumor_alt_dict, extra_infos=\"\"):\n def __init__(self, handle):\n def __del__(self):\nclass Position(object):\nclass AltInfos(object):\nclass TensorStdout(object):" }, { "identifier": "bed_tree_from", "path": "shared/interval_tree.py", "snippet": "def bed_tree_from(bed_file_path,\n expand_region=None,\n contig_name=None,\n bed_ctg_start=None,\n bed_ctg_end=None,\n return_bed_region=False,\n padding=None,\n region=None):\n \"\"\"\n 0-based interval tree [start, end)\n \"\"\"\n\n tree = {}\n if region is not None:\n try:\n ctg_name, start_end = region.split(':')\n ctg_start, ctg_end = int(start_end.split('-')[0]) - 1, int(start_end.split('-')[1]) - 1 # bed format\n except:\n sys.exit(\"[ERROR] Please input the correct format for --region ctg_name:start-end, your input is {}\".format(region))\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid region input: {}\".format(region))\n\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n tree[ctg_name].addi(ctg_start, ctg_end)\n if return_bed_region:\n return tree, None, None\n return tree\n\n if bed_file_path is None or bed_file_path == \"\":\n if return_bed_region:\n return tree, None, None\n return tree\n\n bed_start, bed_end = float('inf'), 0\n unzip_process = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (bed_file_path)))\n for row_id, row in enumerate(unzip_process.stdout):\n if row[0] == '#':\n continue\n columns = row.strip().split()\n\n ctg_name = columns[0]\n if contig_name != None and ctg_name != contig_name:\n continue\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n\n ctg_start, ctg_end = int(columns[1]), int(columns[2])\n\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid bed input in {}-th row {} {} {}\".format(row_id+1, ctg_name, ctg_start, ctg_end))\n\n if bed_ctg_start and bed_ctg_end:\n if ctg_end < bed_ctg_start or ctg_start > bed_ctg_end:\n continue\n if padding:\n ctg_start += padding\n ctg_end -= padding\n bed_start = min(ctg_start, bed_start)\n bed_end = max(ctg_end, bed_end)\n if ctg_start == ctg_end:\n ctg_end += 1\n\n tree[ctg_name].addi(ctg_start, ctg_end)\n\n unzip_process.stdout.close()\n unzip_process.wait()\n if return_bed_region:\n return tree, bed_start, bed_end\n return tree" }, { "identifier": "IntervalTree", "path": "shared/intervaltree/intervaltree.py", "snippet": "class IntervalTree(MutableSet):\n \"\"\"\n A binary lookup tree of intervals.\n The intervals contained in the tree are represented using ``Interval(a, b, data)`` objects.\n Each such object represents a half-open interval ``[a, b)`` with optional data.\n\n Examples:\n ---------\n\n Initialize a blank tree::\n\n >>> tree = IntervalTree()\n >>> tree\n IntervalTree()\n\n Initialize a tree from an iterable set of Intervals in O(n * log n)::\n\n >>> tree = IntervalTree([Interval(-10, 10), Interval(-20.0, -10.0)])\n >>> tree\n IntervalTree([Interval(-20.0, -10.0), Interval(-10, 10)])\n >>> len(tree)\n 2\n\n Note that this is a set, i.e. repeated intervals are ignored. However,\n Intervals with different data fields are regarded as different::\n\n >>> tree = IntervalTree([Interval(-10, 10), Interval(-10, 10), Interval(-10, 10, \"x\")])\n >>> tree\n IntervalTree([Interval(-10, 10), Interval(-10, 10, 'x')])\n >>> len(tree)\n 2\n\n Insertions::\n >>> tree = IntervalTree()\n >>> tree[0:1] = \"data\"\n >>> tree.add(Interval(10, 20))\n >>> tree.addi(19.9, 20)\n >>> tree\n IntervalTree([Interval(0, 1, 'data'), Interval(10, 20), Interval(19.9, 20)])\n >>> tree.update([Interval(19.9, 20.1), Interval(20.1, 30)])\n >>> len(tree)\n 5\n\n Inserting the same Interval twice does nothing::\n >>> tree = IntervalTree()\n >>> tree[-10:20] = \"arbitrary data\"\n >>> tree[-10:20] = None # Note that this is also an insertion\n >>> tree\n IntervalTree([Interval(-10, 20), Interval(-10, 20, 'arbitrary data')])\n >>> tree[-10:20] = None # This won't change anything\n >>> tree[-10:20] = \"arbitrary data\" # Neither will this\n >>> len(tree)\n 2\n\n Deletions::\n >>> tree = IntervalTree(Interval(b, e) for b, e in [(-10, 10), (-20, -10), (10, 20)])\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(-10, 10), Interval(10, 20)])\n >>> tree.remove(Interval(-10, 10))\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(10, 20)])\n >>> tree.remove(Interval(-10, 10))\n Traceback (most recent call last):\n ...\n ValueError\n >>> tree.discard(Interval(-10, 10)) # Same as remove, but no exception on failure\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(10, 20)])\n\n Delete intervals, overlapping a given point::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.1)\n >>> tree\n IntervalTree([Interval(-1.1, 1.1)])\n\n Delete intervals, overlapping an interval::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_overlap(0, 0.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.7, 1.8)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.6, 1.6) # Null interval does nothing\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.6, 1.5) # Ditto\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n\n Delete intervals, enveloped in the range::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_envelop(-1.0, 1.5)\n >>> tree\n IntervalTree([Interval(-1.1, 1.1), Interval(0.5, 1.7)])\n >>> tree.remove_envelop(-1.1, 1.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_envelop(0.5, 1.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_envelop(0.5, 1.7)\n >>> tree\n IntervalTree()\n\n Point queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree[-1.1] == set([Interval(-1.1, 1.1)])\n >>> assert tree.at(1.1) == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)]) # Same as tree[1.1]\n >>> assert tree.at(1.5) == set([Interval(0.5, 1.7)]) # Same as tree[1.5]\n\n Interval overlap queries\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree.overlap(1.7, 1.8) == set()\n >>> assert tree.overlap(1.5, 1.8) == set([Interval(0.5, 1.7)])\n >>> assert tree[1.5:1.8] == set([Interval(0.5, 1.7)]) # same as previous\n >>> assert tree.overlap(1.1, 1.8) == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree[1.1:1.8] == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)]) # same as previous\n\n Interval envelop queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree.envelop(-0.5, 0.5) == set()\n >>> assert tree.envelop(-0.5, 1.5) == set([Interval(-0.5, 1.5)])\n\n Membership queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> Interval(-0.5, 0.5) in tree\n False\n >>> Interval(-1.1, 1.1) in tree\n True\n >>> Interval(-1.1, 1.1, \"x\") in tree\n False\n >>> tree.overlaps(-1.1)\n True\n >>> tree.overlaps(1.7)\n False\n >>> tree.overlaps(1.7, 1.8)\n False\n >>> tree.overlaps(-1.2, -1.1)\n False\n >>> tree.overlaps(-1.2, -1.0)\n True\n\n Sizing::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> len(tree)\n 3\n >>> tree.is_empty()\n False\n >>> IntervalTree().is_empty()\n True\n >>> not tree\n False\n >>> not IntervalTree()\n True\n >>> print(tree.begin()) # using print() because of floats in Python 2.6\n -1.1\n >>> print(tree.end()) # ditto\n 1.7\n\n Iteration::\n\n >>> tree = IntervalTree([Interval(-11, 11), Interval(-5, 15), Interval(5, 17)])\n >>> [iv.begin for iv in sorted(tree)]\n [-11, -5, 5]\n >>> assert tree.items() == set([Interval(-5, 15), Interval(-11, 11), Interval(5, 17)])\n\n Copy- and typecasting, pickling::\n\n >>> tree0 = IntervalTree([Interval(0, 1, \"x\"), Interval(1, 2, [\"x\"])])\n >>> tree1 = IntervalTree(tree0) # Shares Interval objects\n >>> tree2 = tree0.copy() # Shallow copy (same as above, as Intervals are singletons)\n >>> import pickle\n >>> tree3 = pickle.loads(pickle.dumps(tree0)) # Deep copy\n >>> list(tree0[1])[0].data[0] = \"y\" # affects shallow copies, but not deep copies\n >>> tree0\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree1\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree2\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree3\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['x'])])\n\n Equality testing::\n\n >>> IntervalTree([Interval(0, 1)]) == IntervalTree([Interval(0, 1)])\n True\n >>> IntervalTree([Interval(0, 1)]) == IntervalTree([Interval(0, 1, \"x\")])\n False\n \"\"\"\n @classmethod\n def from_tuples(cls, tups):\n \"\"\"\n Create a new IntervalTree from an iterable of 2- or 3-tuples,\n where the tuple lists begin, end, and optionally data.\n \"\"\"\n ivs = [Interval(*t) for t in tups]\n return IntervalTree(ivs)\n\n def __init__(self, intervals=None):\n \"\"\"\n Set up a tree. If intervals is provided, add all the intervals\n to the tree.\n\n Completes in O(n*log n) time.\n \"\"\"\n intervals = set(intervals) if intervals is not None else set()\n for iv in intervals:\n if iv.is_null():\n raise ValueError(\n \"IntervalTree: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n self.all_intervals = intervals\n self.top_node = Node.from_intervals(self.all_intervals)\n self.boundary_table = SortedDict()\n for iv in self.all_intervals:\n self._add_boundaries(iv)\n\n def copy(self):\n \"\"\"\n Construct a new IntervalTree using shallow copies of the\n intervals in the source tree.\n\n Completes in O(n*log n) time.\n :rtype: IntervalTree\n \"\"\"\n return IntervalTree(iv.copy() for iv in self)\n\n def _add_boundaries(self, interval):\n \"\"\"\n Records the boundaries of the interval in the boundary table.\n \"\"\"\n begin = interval.begin\n end = interval.end\n if begin in self.boundary_table:\n self.boundary_table[begin] += 1\n else:\n self.boundary_table[begin] = 1\n\n if end in self.boundary_table:\n self.boundary_table[end] += 1\n else:\n self.boundary_table[end] = 1\n\n def _remove_boundaries(self, interval):\n \"\"\"\n Removes the boundaries of the interval from the boundary table.\n \"\"\"\n begin = interval.begin\n end = interval.end\n if self.boundary_table[begin] == 1:\n del self.boundary_table[begin]\n else:\n self.boundary_table[begin] -= 1\n\n if self.boundary_table[end] == 1:\n del self.boundary_table[end]\n else:\n self.boundary_table[end] -= 1\n\n def add(self, interval):\n \"\"\"\n Adds an interval to the tree, if not already present.\n\n Completes in O(log n) time.\n \"\"\"\n if interval in self:\n return\n\n if interval.is_null():\n raise ValueError(\n \"IntervalTree: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(interval)\n )\n\n if not self.top_node:\n self.top_node = Node.from_interval(interval)\n else:\n self.top_node = self.top_node.add(interval)\n self.all_intervals.add(interval)\n self._add_boundaries(interval)\n append = add\n\n def addi(self, begin, end, data=None):\n \"\"\"\n Shortcut for add(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.add(Interval(begin, end, data))\n appendi = addi\n\n def update(self, intervals):\n \"\"\"\n Given an iterable of intervals, add them to the tree.\n\n Completes in O(m*log(n+m), where m = number of intervals to\n add.\n \"\"\"\n for iv in intervals:\n self.add(iv)\n\n def remove(self, interval):\n \"\"\"\n Removes an interval from the tree, if present. If not, raises\n ValueError.\n\n Completes in O(log n) time.\n \"\"\"\n #self.verify()\n if interval not in self:\n #print(self.all_intervals)\n raise ValueError\n self.top_node = self.top_node.remove(interval)\n self.all_intervals.remove(interval)\n self._remove_boundaries(interval)\n #self.verify()\n\n def removei(self, begin, end, data=None):\n \"\"\"\n Shortcut for remove(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.remove(Interval(begin, end, data))\n\n def discard(self, interval):\n \"\"\"\n Removes an interval from the tree, if present. If not, does\n nothing.\n\n Completes in O(log n) time.\n \"\"\"\n if interval not in self:\n return\n self.all_intervals.discard(interval)\n self.top_node = self.top_node.discard(interval)\n self._remove_boundaries(interval)\n\n def discardi(self, begin, end, data=None):\n \"\"\"\n Shortcut for discard(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.discard(Interval(begin, end, data))\n\n def difference(self, other):\n \"\"\"\n Returns a new tree, comprising all intervals in self but not\n in other.\n \"\"\"\n ivs = set()\n for iv in self:\n if iv not in other:\n ivs.add(iv)\n return IntervalTree(ivs)\n\n def difference_update(self, other):\n \"\"\"\n Removes all intervals in other from self.\n \"\"\"\n for iv in other:\n self.discard(iv)\n\n def union(self, other):\n \"\"\"\n Returns a new tree, comprising all intervals from self\n and other.\n \"\"\"\n return IntervalTree(set(self).union(other))\n\n def intersection(self, other):\n \"\"\"\n Returns a new tree of all intervals common to both self and\n other.\n \"\"\"\n ivs = set()\n shorter, longer = sorted([self, other], key=len)\n for iv in shorter:\n if iv in longer:\n ivs.add(iv)\n return IntervalTree(ivs)\n\n def intersection_update(self, other):\n \"\"\"\n Removes intervals from self unless they also exist in other.\n \"\"\"\n ivs = list(self)\n for iv in ivs:\n if iv not in other:\n self.remove(iv)\n\n def symmetric_difference(self, other):\n \"\"\"\n Return a tree with elements only in self or other but not\n both.\n \"\"\"\n if not isinstance(other, set): other = set(other)\n me = set(self)\n ivs = me.difference(other).union(other.difference(me))\n return IntervalTree(ivs)\n\n def symmetric_difference_update(self, other):\n \"\"\"\n Throws out all intervals except those only in self or other,\n not both.\n \"\"\"\n other = set(other)\n ivs = list(self)\n for iv in ivs:\n if iv in other:\n self.remove(iv)\n other.remove(iv)\n self.update(other)\n\n def remove_overlap(self, begin, end=None):\n \"\"\"\n Removes all intervals overlapping the given point or range.\n\n Completes in O((r+m)*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * r = size of the search range (this is 1 for a point)\n \"\"\"\n hitlist = self.at(begin) if end is None else self.overlap(begin, end)\n for iv in hitlist:\n self.remove(iv)\n\n def remove_envelop(self, begin, end):\n \"\"\"\n Removes all intervals completely enveloped in the given range.\n\n Completes in O((r+m)*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * r = size of the search range\n \"\"\"\n hitlist = self.envelop(begin, end)\n for iv in hitlist:\n self.remove(iv)\n\n def chop(self, begin, end, datafunc=None):\n \"\"\"\n Like remove_envelop(), but trims back Intervals hanging into\n the chopped area so that nothing overlaps.\n \"\"\"\n insertions = set()\n begin_hits = [iv for iv in self.at(begin) if iv.begin < begin]\n end_hits = [iv for iv in self.at(end) if iv.end > end]\n\n if datafunc:\n for iv in begin_hits:\n insertions.add(Interval(iv.begin, begin, datafunc(iv, True)))\n for iv in end_hits:\n insertions.add(Interval(end, iv.end, datafunc(iv, False)))\n else:\n for iv in begin_hits:\n insertions.add(Interval(iv.begin, begin, iv.data))\n for iv in end_hits:\n insertions.add(Interval(end, iv.end, iv.data))\n\n self.remove_envelop(begin, end)\n self.difference_update(begin_hits)\n self.difference_update(end_hits)\n self.update(insertions)\n\n def slice(self, point, datafunc=None):\n \"\"\"\n Split Intervals that overlap point into two new Intervals. if\n specified, uses datafunc(interval, islower=True/False) to\n set the data field of the new Intervals.\n :param point: where to slice\n :param datafunc(interval, isupper): callable returning a new\n value for the interval's data field\n \"\"\"\n hitlist = set(iv for iv in self.at(point) if iv.begin < point)\n insertions = set()\n if datafunc:\n for iv in hitlist:\n insertions.add(Interval(iv.begin, point, datafunc(iv, True)))\n insertions.add(Interval(point, iv.end, datafunc(iv, False)))\n else:\n for iv in hitlist:\n insertions.add(Interval(iv.begin, point, iv.data))\n insertions.add(Interval(point, iv.end, iv.data))\n self.difference_update(hitlist)\n self.update(insertions)\n\n def clear(self):\n \"\"\"\n Empties the tree.\n\n Completes in O(1) tine.\n \"\"\"\n self.__init__()\n\n def find_nested(self):\n \"\"\"\n Returns a dictionary mapping parent intervals to sets of\n intervals overlapped by and contained in the parent.\n\n Completes in O(n^2) time.\n :rtype: dict of [Interval, set of Interval]\n \"\"\"\n result = {}\n\n def add_if_nested():\n if parent.contains_interval(child):\n if parent not in result:\n result[parent] = set()\n result[parent].add(child)\n\n long_ivs = sorted(self.all_intervals, key=Interval.length, reverse=True)\n for i, parent in enumerate(long_ivs):\n for child in long_ivs[i + 1:]:\n add_if_nested()\n return result\n\n def overlaps(self, begin, end=None):\n \"\"\"\n Returns whether some interval in the tree overlaps the given\n point or range.\n\n Completes in O(r*log n) time, where r is the size of the\n search range.\n :rtype: bool\n \"\"\"\n if end is not None:\n return self.overlaps_range(begin, end)\n elif isinstance(begin, Number):\n return self.overlaps_point(begin)\n else:\n return self.overlaps_range(begin.begin, begin.end)\n\n def overlaps_point(self, p):\n \"\"\"\n Returns whether some interval in the tree overlaps p.\n\n Completes in O(log n) time.\n :rtype: bool\n \"\"\"\n if self.is_empty():\n return False\n return bool(self.top_node.contains_point(p))\n\n def overlaps_range(self, begin, end):\n \"\"\"\n Returns whether some interval in the tree overlaps the given\n range. Returns False if given a null interval over which to\n test.\n\n Completes in O(r*log n) time, where r is the range length and n\n is the table size.\n :rtype: bool\n \"\"\"\n if self.is_empty():\n return False\n elif begin >= end:\n return False\n elif self.overlaps_point(begin):\n return True\n return any(\n self.overlaps_point(bound)\n for bound in self.boundary_table\n if begin < bound < end\n )\n\n def split_overlaps(self):\n \"\"\"\n Finds all intervals with overlapping ranges and splits them\n along the range boundaries.\n\n Completes in worst-case O(n^2*log n) time (many interval\n boundaries are inside many intervals), best-case O(n*log n)\n time (small number of overlaps << n per interval).\n \"\"\"\n if not self:\n return\n if len(self.boundary_table) == 2:\n return\n\n bounds = sorted(self.boundary_table) # get bound locations\n\n new_ivs = set()\n for lbound, ubound in zip(bounds[:-1], bounds[1:]):\n for iv in self[lbound]:\n new_ivs.add(Interval(lbound, ubound, iv.data))\n\n self.__init__(new_ivs)\n\n def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True):\n \"\"\"\n Finds all intervals with overlapping ranges and merges them\n into a single interval. If provided, uses data_reducer and\n data_initializer with similar semantics to Python's built-in\n reduce(reducer_func[, initializer]), as follows:\n\n If data_reducer is set to a function, combines the data\n fields of the Intervals with\n current_reduced_data = data_reducer(current_reduced_data, new_data)\n If data_reducer is None, the merged Interval's data\n field will be set to None, ignoring all the data fields\n of the merged Intervals.\n\n On encountering the first Interval to merge, if\n data_initializer is None (default), uses the first\n Interval's data field as the first value for\n current_reduced_data. If data_initializer is not None,\n current_reduced_data is set to a shallow copy of\n data_initializer created with copy.copy(data_initializer).\n\n If strict is True (default), intervals are only merged if\n their ranges actually overlap; adjacent, touching intervals\n will not be merged. If strict is False, intervals are merged\n even if they are only end-to-end adjacent.\n\n Completes in O(n*logn).\n \"\"\"\n if not self:\n return\n\n sorted_intervals = sorted(self.all_intervals) # get sorted intervals\n merged = []\n # use mutable object to allow new_series() to modify it\n current_reduced = [None]\n higher = None # iterating variable, which new_series() needs access to\n\n def new_series():\n if data_initializer is None:\n current_reduced[0] = higher.data\n merged.append(higher)\n return\n else: # data_initializer is not None\n current_reduced[0] = copy(data_initializer)\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n merged.append(Interval(higher.begin, higher.end, current_reduced[0]))\n\n for higher in sorted_intervals:\n if merged: # series already begun\n lower = merged[-1]\n if (higher.begin < lower.end or\n not strict and higher.begin == lower.end): # should merge\n upper_bound = max(lower.end, higher.end)\n if data_reducer is not None:\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n else: # annihilate the data, since we don't know how to merge it\n current_reduced[0] = None\n merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])\n else:\n new_series()\n else: # not merged; is first of Intervals to merge\n new_series()\n\n self.__init__(merged)\n\n def merge_equals(self, data_reducer=None, data_initializer=None):\n \"\"\"\n Finds all intervals with equal ranges and merges them\n into a single interval. If provided, uses data_reducer and\n data_initializer with similar semantics to Python's built-in\n reduce(reducer_func[, initializer]), as follows:\n\n If data_reducer is set to a function, combines the data\n fields of the Intervals with\n current_reduced_data = data_reducer(current_reduced_data, new_data)\n If data_reducer is None, the merged Interval's data\n field will be set to None, ignoring all the data fields\n of the merged Intervals.\n\n On encountering the first Interval to merge, if\n data_initializer is None (default), uses the first\n Interval's data field as the first value for\n current_reduced_data. If data_initializer is not None,\n current_reduced_data is set to a shallow copy of\n data_initiazer created with\n copy.copy(data_initializer).\n\n Completes in O(n*logn).\n \"\"\"\n if not self:\n return\n\n sorted_intervals = sorted(self.all_intervals) # get sorted intervals\n merged = []\n # use mutable object to allow new_series() to modify it\n current_reduced = [None]\n higher = None # iterating variable, which new_series() needs access to\n\n def new_series():\n if data_initializer is None:\n current_reduced[0] = higher.data\n merged.append(higher)\n return\n else: # data_initializer is not None\n current_reduced[0] = copy(data_initializer)\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n merged.append(Interval(higher.begin, higher.end, current_reduced[0]))\n\n for higher in sorted_intervals:\n if merged: # series already begun\n lower = merged[-1]\n if higher.range_matches(lower): # should merge\n upper_bound = max(lower.end, higher.end)\n if data_reducer is not None:\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n else: # annihilate the data, since we don't know how to merge it\n current_reduced[0] = None\n merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])\n else:\n new_series()\n else: # not merged; is first of Intervals to merge\n new_series()\n\n self.__init__(merged)\n\n def items(self):\n \"\"\"\n Constructs and returns a set of all intervals in the tree.\n\n Completes in O(n) time.\n :rtype: set of Interval\n \"\"\"\n return set(self.all_intervals)\n\n def is_empty(self):\n \"\"\"\n Returns whether the tree is empty.\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n return 0 == len(self)\n\n def at(self, p):\n \"\"\"\n Returns the set of all intervals that contain p.\n\n Completes in O(m + log n) time, where:\n * n = size of the tree\n * m = number of matches\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n return root.search_point(p, set())\n\n def envelop(self, begin, end=None):\n \"\"\"\n Returns the set of all intervals fully contained in the range\n [begin, end).\n\n Completes in O(m + k*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n if end is None:\n iv = begin\n return self.envelop(iv.begin, iv.end)\n elif begin >= end:\n return set()\n result = root.search_point(begin, set()) # bound_begin might be greater\n boundary_table = self.boundary_table\n bound_begin = boundary_table.bisect_left(begin)\n bound_end = boundary_table.bisect_left(end) # up to, but not including end\n result.update(root.search_overlap(\n # slice notation is slightly slower\n boundary_table.keys()[index] for index in xrange(bound_begin, bound_end)\n ))\n\n # TODO: improve envelop() to use node info instead of less-efficient filtering\n result = set(\n iv for iv in result\n if iv.begin >= begin and iv.end <= end\n )\n return result\n\n def overlap(self, begin, end=None):\n \"\"\"\n Returns a set of all intervals overlapping the given range.\n\n Completes in O(m + k*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n if end is None:\n iv = begin\n return self.overlap(iv.begin, iv.end)\n elif begin >= end:\n return set()\n result = root.search_point(begin, set()) # bound_begin might be greater\n boundary_table = self.boundary_table\n bound_begin = boundary_table.bisect_left(begin)\n bound_end = boundary_table.bisect_left(end) # up to, but not including end\n result.update(root.search_overlap(\n # slice notation is slightly slower\n boundary_table.keys()[index] for index in xrange(bound_begin, bound_end)\n ))\n return result\n\n def begin(self):\n \"\"\"\n Returns the lower bound of the first interval in the tree.\n\n Completes in O(1) time.\n \"\"\"\n if not self.boundary_table:\n return 0\n return self.boundary_table.keys()[0]\n\n def end(self):\n \"\"\"\n Returns the upper bound of the last interval in the tree.\n\n Completes in O(1) time.\n \"\"\"\n if not self.boundary_table:\n return 0\n return self.boundary_table.keys()[-1]\n\n def range(self):\n \"\"\"\n Returns a minimum-spanning Interval that encloses all the\n members of this IntervalTree. If the tree is empty, returns\n null Interval.\n :rtype: Interval\n \"\"\"\n return Interval(self.begin(), self.end())\n\n def span(self):\n \"\"\"\n Returns the length of the minimum-spanning Interval that\n encloses all the members of this IntervalTree. If the tree\n is empty, return 0.\n \"\"\"\n if not self:\n return 0\n return self.end() - self.begin()\n\n def print_structure(self, tostring=False):\n \"\"\"\n ## FOR DEBUGGING ONLY ##\n Pretty-prints the structure of the tree.\n If tostring is true, prints nothing and returns a string.\n :rtype: None or str\n \"\"\"\n if self.top_node:\n return self.top_node.print_structure(tostring=tostring)\n else:\n result = \"<empty IntervalTree>\"\n if not tostring:\n print(result)\n else:\n return result\n\n def verify(self):\n \"\"\"\n ## FOR DEBUGGING ONLY ##\n Checks the table to ensure that the invariants are held.\n \"\"\"\n if self.all_intervals:\n ## top_node.all_children() == self.all_intervals\n try:\n assert self.top_node.all_children() == self.all_intervals\n except AssertionError as e:\n print(\n 'Error: the tree and the membership set are out of sync!'\n )\n tivs = set(self.top_node.all_children())\n print('top_node.all_children() - all_intervals:')\n try:\n pprint\n except NameError:\n from pprint import pprint\n pprint(tivs - self.all_intervals)\n print('all_intervals - top_node.all_children():')\n pprint(self.all_intervals - tivs)\n raise e\n\n ## All members are Intervals\n for iv in self:\n assert isinstance(iv, Interval), (\n \"Error: Only Interval objects allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n\n ## No null intervals\n for iv in self:\n assert not iv.is_null(), (\n \"Error: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n\n ## Reconstruct boundary_table\n bound_check = {}\n for iv in self:\n if iv.begin in bound_check:\n bound_check[iv.begin] += 1\n else:\n bound_check[iv.begin] = 1\n if iv.end in bound_check:\n bound_check[iv.end] += 1\n else:\n bound_check[iv.end] = 1\n\n ## Reconstructed boundary table (bound_check) ==? boundary_table\n assert set(self.boundary_table.keys()) == set(bound_check.keys()),\\\n 'Error: boundary_table is out of sync with ' \\\n 'the intervals in the tree!'\n\n # For efficiency reasons this should be iteritems in Py2, but we\n # don't care much for efficiency in debug methods anyway.\n for key, val in self.boundary_table.items():\n assert bound_check[key] == val, \\\n 'Error: boundary_table[{0}] should be {1},' \\\n ' but is {2}!'.format(\n key, bound_check[key], val)\n\n ## Internal tree structure\n self.top_node.verify(set())\n else:\n ## Verify empty tree\n assert not self.boundary_table, \\\n \"Error: boundary table should be empty!\"\n assert self.top_node is None, \\\n \"Error: top_node isn't None!\"\n\n def score(self, full_report=False):\n \"\"\"\n Returns a number between 0 and 1, indicating how suboptimal the tree\n is. The lower, the better. Roughly, this number represents the\n fraction of flawed Intervals in the tree.\n :rtype: float\n \"\"\"\n if len(self) <= 2:\n return 0.0\n\n n = len(self)\n m = self.top_node.count_nodes()\n\n def s_center_score():\n \"\"\"\n Returns a normalized score, indicating roughly how many times\n intervals share s_center with other intervals. Output is full-scale\n from 0 to 1.\n :rtype: float\n \"\"\"\n raw = n - m\n maximum = n - 1\n return raw / float(maximum)\n\n report = {\n \"depth\": self.top_node.depth_score(n, m),\n \"s_center\": s_center_score(),\n }\n cumulative = max(report.values())\n report[\"_cumulative\"] = cumulative\n if full_report:\n return report\n return cumulative\n\n\n def __getitem__(self, index):\n \"\"\"\n Returns a set of all intervals overlapping the given index or\n slice.\n\n Completes in O(k * log(n) + m) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range (this is 1 for a point)\n :rtype: set of Interval\n \"\"\"\n try:\n start, stop = index.start, index.stop\n if start is None:\n start = self.begin()\n if stop is None:\n return set(self)\n if stop is None:\n stop = self.end()\n return self.overlap(start, stop)\n except AttributeError:\n return self.at(index)\n\n def __setitem__(self, index, value):\n \"\"\"\n Adds a new interval to the tree. A shortcut for\n add(Interval(index.start, index.stop, value)).\n\n If an identical Interval object with equal range and data\n already exists, does nothing.\n\n Completes in O(log n) time.\n \"\"\"\n self.addi(index.start, index.stop, value)\n\n def __delitem__(self, point):\n \"\"\"\n Delete all items overlapping point.\n \"\"\"\n self.remove_overlap(point)\n\n def __contains__(self, item):\n \"\"\"\n Returns whether item exists as an Interval in the tree.\n This method only returns True for exact matches; for\n overlaps, see the overlaps() method.\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n # Removed point-checking code; it might trick the user into\n # thinking that this is O(1), which point-checking isn't.\n #if isinstance(item, Interval):\n return item in self.all_intervals\n #else:\n # return self.contains_point(item)\n\n def containsi(self, begin, end, data=None):\n \"\"\"\n Shortcut for (Interval(begin, end, data) in tree).\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n return Interval(begin, end, data) in self\n\n def __iter__(self):\n \"\"\"\n Returns an iterator over all the intervals in the tree.\n\n Completes in O(1) time.\n :rtype: collections.Iterable[Interval]\n \"\"\"\n return self.all_intervals.__iter__()\n iter = __iter__\n\n def __len__(self):\n \"\"\"\n Returns how many intervals are in the tree.\n\n Completes in O(1) time.\n :rtype: int\n \"\"\"\n return len(self.all_intervals)\n\n def __eq__(self, other):\n \"\"\"\n Whether two IntervalTrees are equal.\n\n Completes in O(n) time if sizes are equal; O(1) time otherwise.\n :rtype: bool\n \"\"\"\n return (\n isinstance(other, IntervalTree) and\n self.all_intervals == other.all_intervals\n )\n\n def __repr__(self):\n \"\"\"\n :rtype: str\n \"\"\"\n ivs = sorted(self)\n if not ivs:\n return \"IntervalTree()\"\n else:\n return \"IntervalTree({0})\".format(ivs)\n\n __str__ = __repr__\n\n def __reduce__(self):\n \"\"\"\n For pickle-ing.\n :rtype: tuple\n \"\"\"\n return IntervalTree, (sorted(self.all_intervals),)" } ]
import sys import os import shlex import ctypes import re import subprocess import shared.param as param from subprocess import PIPE from argparse import ArgumentParser, SUPPRESS from collections import defaultdict from shared.utils import subprocess_popen, reference_sequence_from, IUPAC_base_to_ACGT_base_dict as BASE2ACGT, log_error from shared.interval_tree import bed_tree_from from shared.intervaltree.intervaltree import IntervalTree
14,016
start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path)) tree = bed_tree_from(bed_file_path=bed_file_path) if is_bed_file_given and ctg_name not in tree: sys.exit("[ERROR] ctg_name({}) not exists in bed file({}).".format(ctg_name, bed_file_path)) bed_option = ' -L {}'.format(extend_bed) if extend_bed else "" bed_option = ' -L {}'.format(bed_file_path) if is_bed_file_given else bed_option mq_option = ' -q {}'.format(min_mq) if min_mq > 0 else "" samtools_view_command = "{} view -h {} {}".format(samtools_execute_command, bam_file_path, " ".join(reads_regions)) + mq_option + bed_option samtools_view_process = subprocess_popen( shlex.split(samtools_view_command) ) if read_fn and read_fn == 'PIPE': save_file_fp = TensorStdout(sys.stdout) elif read_fn: save_file_fp = subprocess_popen(shlex.split("{} view -bh - -o {}".format(samtools_execute_command, read_fn + ( '.{}_{}'.format(ctg_start, ctg_end) if is_ctg_range_given and not test_pos else ""))), stdin=PIPE, stdout=PIPE) reference_start_0_based = 0 if reference_start is None else (reference_start - 1) header = [] add_header = False aligned_reads = defaultdict() pileup = defaultdict(lambda: {"X": 0}) samtools_view_generator = samtools_view_generator_from(samtools_view_process=samtools_view_process, aligned_reads=aligned_reads, pileup=pileup, ctg_name=ctg_name, reference_sequence=reference_sequence, reference_start_0_based=reference_start_0_based, header=header, center_pos=POS) pre_aligned_reads = defaultdict() while True: chunk_start, chunk_end = next(samtools_view_generator) if chunk_start is None: break if not add_header: save_file_fp.stdin.write(''.join(header)) add_header = True variant_allele_list = [[position, pileup[position]["X"]] for position in list(pileup.keys())] candidate_position_list = [(position, support_allele_count) for position, support_allele_count in variant_allele_list if support_allele_count >= min_coverage and position >= chunk_start - region_expansion_in_bp - 1 and position <= chunk_end + region_expansion_in_bp - 1] candidate_position_list.sort(key=(lambda x: x[0])) candidate_position_list = [item for item in candidate_position_list if item[0] >= POS - args.max_distance and item[0] < POS + args.max_distance] if not len(aligned_reads) or not len(candidate_position_list): continue if len(pre_aligned_reads): # update the read in previous chunk for read_name, read in pre_aligned_reads.items(): aligned_reads[read_name] = read region_dict = {} split_region_size = max_window_size
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. realign_chunk_size = 5000 min_dbg_mapping_quality = min_dbg_base_quality = 20 region_expansion_in_bp = expand_align_ref_region = 20 min_windows_distance = expand_align_ref_region * 4 max_window_size = max_region_reads_num = 1000 expandReferenceRegion = 100000 realigner_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/realigner',))) dbg_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/debruijn_graph',))) if not os.path.exists(realigner_mod) or not os.path.exists(dbg_mod): # try to find modules in clair3 python_path = subprocess.run('which python', stdout=subprocess.PIPE, shell=True).stdout.decode().rstrip() conda_prefix = os.path.dirname(os.path.dirname(python_path)) clair3_realign_path = os.path.join(conda_prefix, 'bin', 'preprocess', 'realign') clair3_realigner_mod = os.path.join(clair3_realign_path, 'realigner') clair3_dbg_mod = os.path.join(clair3_realign_path, 'debruijn_graph') if os.path.exists(clair3_realigner_mod) and os.path.exists(clair3_dbg_mod): realigner_mod = clair3_realigner_mod dbg_mod = clair3_dbg_mod else: print(log_error("[ERROR] `realigner` or `debruijn_graph` submodule not found in conda environment, pls install clair3-illumina package!")) sys.exit(1) realigner = ctypes.cdll.LoadLibrary(realigner_mod) dbg = ctypes.cdll.LoadLibrary(dbg_mod) class StructPointer(ctypes.Structure): _fields_ = [("position", ctypes.c_int * max_region_reads_num), ("cigar_string", ctypes.c_char_p * max_region_reads_num), ] class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base): return base if base == "N" else BASE2ACGT[base] def region_from(ctg_name, ctg_start=None, ctg_end=None): """ 1-based region string [start, end] """ if ctg_name is None: return "" if (ctg_start is None) != (ctg_end is None): return "" if ctg_start is None and ctg_end is None: return "{}".format(ctg_name) return "{}:{}-{}".format(ctg_name, ctg_start, ctg_end) class TensorStdout(object): def __init__(self, handle): self.stdin = handle def __del__(self): self.stdin.close() def get_halpotype_tag(samtools_view_columns): found_hp_tag = False tag = [c for c in samtools_view_columns if 'HP:i:' in c] if not len(tag) or len(tag[0]) < 6 or not tag[0][5].isdigit(): return None return tag[0][5] def is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): soft_clipped_bases = 0 total_alignment_positions = 0 advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == "S": soft_clipped_bases += advance total_alignment_positions += advance advance = 0 # skip a read less than 55% aligned return 1.0 - float(soft_clipped_bases) / (total_alignment_positions + 1) < 0.55 def samtools_view_generator_from(samtools_view_process, aligned_reads, pileup, ctg_name, reference_sequence, reference_start_0_based, header, center_pos=None): CHUNK_SIZE = realign_chunk_size chunk_start, chunk_end = None, None for row_id, row in enumerate(samtools_view_process.stdout): if row[0] == '@': header.append(row) continue columns = row.strip().split() RNAME = columns[2] if RNAME != ctg_name: continue read_name = columns[0] FLAG = int(columns[1]) POS = int(columns[3]) - 1 # switch from 1-base to 0-base to match sequence index MAPQ = int(columns[4]) CIGAR = columns[5] SEQ = columns[9].upper() # uppercase for SEQ (regexp is \*|[A-Za-z=.]+) RNEXT = columns[6] PNEXT = columns[7] TLEN = columns[8] reference_position = POS query_position = 0 raw_base_quality = columns[10] QUAL = [phredscore2raw_score(item) for item in raw_base_quality] STRAND = (16 == (FLAG & 16)) HP_TAG = get_halpotype_tag(columns[11:]) read_name += "_" + str(int(STRAND)) # distinguish two strand if chunk_start is None: chunk_start = POS chunk_end = chunk_start + CHUNK_SIZE if POS >= chunk_end + region_expansion_in_bp: yield chunk_start, chunk_end chunk_start += CHUNK_SIZE chunk_end += CHUNK_SIZE read = Read(read_start=POS, seq=SEQ, cigar=CIGAR, mapping_quality=MAPQ, base_quality=QUAL, strand=STRAND, raw_base_quality=raw_base_quality, read_name=read_name, flag=FLAG, PNEXT=PNEXT, RNEXT=RNEXT, TLEN=TLEN, phasing=HP_TAG) if CIGAR == "*" or is_too_many_soft_clipped_bases_for_a_read_from(CIGAR): continue aligned_reads[read_name] = read if MAPQ < min_dbg_mapping_quality: continue advance = 0 for c in str(CIGAR): if c.isdigit(): advance = advance * 10 + int(c) continue if c == '=': reference_position += advance query_position += advance elif c == "M" or c == 'X': for _ in range(advance): if QUAL[query_position] >= min_dbg_base_quality: reference_base = reference_sequence[reference_position - reference_start_0_based] # 0 base query_base = SEQ[query_position] if reference_base in 'ACGT' and query_base != reference_base: pileup[reference_position]['X'] += 1 reference_position += 1 query_position += 1 elif c == "I" or c == 'S': pre_base = reference_sequence[reference_position - reference_start_0_based - 1] ins_base_quality = QUAL[query_position: query_position + advance] out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp if not out_of_region and pre_base in 'ACGT' and ( sum([True for bq in ins_base_quality if bq < min_dbg_base_quality]) == 0): # skip the bad seq start = reference_position - advance end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # insertion consumes query query_position += advance elif c == "D": out_of_region = reference_position < chunk_start - region_expansion_in_bp or reference_position > chunk_end + region_expansion_in_bp pre_base = reference_sequence[reference_position - reference_start_0_based - 1] # 0-base if not out_of_region and pre_base in 'ACGT': start = reference_position end = reference_position + advance for ins_idx in range(start, end): pileup[ins_idx]["X"] += 1 # deletion consumes reference reference_position += advance # reset advance advance = 0 yield chunk_start, chunk_end yield None, None def reads_realignment(args): POS = args.pos args.ctg_start = POS - args.realign_flanking_window args.ctg_end = POS + args.realign_flanking_window bed_file_path = args.bed_fn extend_bed = args.extend_bed fasta_file_path = args.ref_fn ctg_name = args.ctg_name ctg_start = args.ctg_start ctg_end = args.ctg_end samtools_execute_command = args.samtools bam_file_path = args.bam_fn min_mq = args.min_mq min_coverage = args.min_coverage is_bed_file_given = bed_file_path is not None is_ctg_name_given = ctg_name is not None read_fn = args.read_fn global test_pos test_pos = None is_ctg_range_given = is_ctg_name_given and ctg_start is not None and ctg_end is not None ref_regions = [] reads_regions = [] reference_start, reference_end = None, None if is_ctg_range_given: extend_start = ctg_start - max_window_size extend_end = ctg_end + max_window_size reads_regions.append(region_from(ctg_name=ctg_name, ctg_start=extend_start, ctg_end=extend_end)) reference_start, reference_end = ctg_start - expandReferenceRegion, ctg_end + expandReferenceRegion reference_start = 1 if reference_start < 1 else reference_start ref_regions.append(region_from(ctg_name=ctg_name, ctg_start=reference_start, ctg_end=reference_end)) elif is_ctg_name_given: reads_regions.append(region_from(ctg_name=ctg_name)) ref_regions.append(region_from(ctg_name=ctg_name)) reference_start = 1 reference_sequence = reference_sequence_from( samtools_execute_command=samtools_execute_command, fasta_file_path=fasta_file_path, regions=ref_regions ) if reference_sequence is None or len(reference_sequence) == 0: sys.exit("[ERROR] Failed to load reference sequence from file ({}).".format(fasta_file_path)) tree = bed_tree_from(bed_file_path=bed_file_path) if is_bed_file_given and ctg_name not in tree: sys.exit("[ERROR] ctg_name({}) not exists in bed file({}).".format(ctg_name, bed_file_path)) bed_option = ' -L {}'.format(extend_bed) if extend_bed else "" bed_option = ' -L {}'.format(bed_file_path) if is_bed_file_given else bed_option mq_option = ' -q {}'.format(min_mq) if min_mq > 0 else "" samtools_view_command = "{} view -h {} {}".format(samtools_execute_command, bam_file_path, " ".join(reads_regions)) + mq_option + bed_option samtools_view_process = subprocess_popen( shlex.split(samtools_view_command) ) if read_fn and read_fn == 'PIPE': save_file_fp = TensorStdout(sys.stdout) elif read_fn: save_file_fp = subprocess_popen(shlex.split("{} view -bh - -o {}".format(samtools_execute_command, read_fn + ( '.{}_{}'.format(ctg_start, ctg_end) if is_ctg_range_given and not test_pos else ""))), stdin=PIPE, stdout=PIPE) reference_start_0_based = 0 if reference_start is None else (reference_start - 1) header = [] add_header = False aligned_reads = defaultdict() pileup = defaultdict(lambda: {"X": 0}) samtools_view_generator = samtools_view_generator_from(samtools_view_process=samtools_view_process, aligned_reads=aligned_reads, pileup=pileup, ctg_name=ctg_name, reference_sequence=reference_sequence, reference_start_0_based=reference_start_0_based, header=header, center_pos=POS) pre_aligned_reads = defaultdict() while True: chunk_start, chunk_end = next(samtools_view_generator) if chunk_start is None: break if not add_header: save_file_fp.stdin.write(''.join(header)) add_header = True variant_allele_list = [[position, pileup[position]["X"]] for position in list(pileup.keys())] candidate_position_list = [(position, support_allele_count) for position, support_allele_count in variant_allele_list if support_allele_count >= min_coverage and position >= chunk_start - region_expansion_in_bp - 1 and position <= chunk_end + region_expansion_in_bp - 1] candidate_position_list.sort(key=(lambda x: x[0])) candidate_position_list = [item for item in candidate_position_list if item[0] >= POS - args.max_distance and item[0] < POS + args.max_distance] if not len(aligned_reads) or not len(candidate_position_list): continue if len(pre_aligned_reads): # update the read in previous chunk for read_name, read in pre_aligned_reads.items(): aligned_reads[read_name] = read region_dict = {} split_region_size = max_window_size
region_tree = IntervalTree()
2
2023-11-07 04:39:16+00:00
16k
the-siesta-group/edfio
tests/test_edfplus_header.py
[ { "identifier": "AnonymizedDateError", "path": "edfio/edf.py", "snippet": "class AnonymizedDateError(ValueError):\n \"\"\"Raised when trying to access an anonymized startdate or birthdate.\"\"\"" }, { "identifier": "Edf", "path": "edfio/edf.py", "snippet": "class Edf:\n \"\"\"Python representation of an EDF file.\n\n EDF header fields are exposed as properties with appropriate data types (i.e.,\n string, numeric, date, or time objects). Fields that might break the file on\n modification (i.e., `version`, `bytes_in_header_record`, `reserved`,\n `num_data_records`, `data_record_duration`, and `num_signals`) can not be set after\n instantiation.\n\n Note that the startdate has to be set via the parameter `recording`.\n\n For writing an EDF file with a non-integer seconds duration, currently an\n appropriate value for `data_record_duration` has to be provided manually.\n\n Parameters\n ----------\n signals : Sequence[EdfSignal]\n The (non-annotation) signals to be contained in the EDF file.\n patient : Patient | None, default: None\n The \"local patient identification\", containing patient code, sex, birthdate,\n name, and optional additional fields. If `None`, the field is set to `X X X X`\n in accordance with EDF+ specs.\n recording : Recording | None, default: None\n The \"local recording identification\", containing recording startdate, hospital\n administration code, investigator/technical code, equipment code, and optional\n additional fields. If `None`, the field is set to `Startdate X X X X` in\n accordance with EDF+ specs.\n starttime : datetime.time | None, default: None\n The starttime of the recording. If `None`, `00.00.00` is used. If `starttime`\n contains microseconds, an EDF+C file is created.\n data_record_duration : float | None, default: None\n The duration of each data record in seconds. If `None`, an appropriate value is\n chosen automatically.\n annotations : Iterable[EdfAnnotation] | None, default: None\n The annotations, consisting of onset, duration (optional), and text. If not\n `None`, an EDF+C file is created.\n \"\"\"\n\n version = RawHeaderFieldInt(8)\n \"\"\"EDF version, always `0`\"\"\"\n local_patient_identification = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"\n Unparsed string representation of the legacy local patient identification.\n\n See also\n --------\n patient: Parsed representation, as a :class:`Patient` object.\n \"\"\"\n local_recording_identification = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"\n Unparsed string representation of the legacy local recording identification.\n\n See also\n --------\n recording: Parsed representation, as a :class:`Recording` object.\n \"\"\"\n _startdate = RawHeaderFieldDate(8, is_settable=True)\n _starttime = RawHeaderFieldTime(8, is_settable=True)\n bytes_in_header_record = RawHeaderFieldInt(8)\n \"\"\"Number of bytes in the header record.\"\"\"\n reserved = RawHeaderFieldStr(44)\n \"\"\"`\"EDF+C\"` for an EDF+C file, else `\"\"`.\"\"\"\n num_data_records = RawHeaderFieldInt(8)\n \"\"\"Number of data records in the recording.\"\"\"\n _data_record_duration = RawHeaderFieldFloat(8, is_settable=True)\n _num_signals = RawHeaderFieldInt(4, is_settable=True)\n\n def __init__(\n self,\n signals: Sequence[EdfSignal],\n *,\n patient: Patient | None = None,\n recording: Recording | None = None,\n starttime: datetime.time | None = None,\n data_record_duration: float | None = None,\n annotations: Iterable[EdfAnnotation] | None = None,\n ):\n if not signals and not annotations:\n raise ValueError(\"Edf must contain either signals or annotations\")\n if patient is None:\n patient = Patient()\n if recording is None:\n recording = Recording()\n if starttime is None:\n starttime = datetime.time(0, 0, 0)\n if data_record_duration is None:\n data_record_duration = _calculate_data_record_duration(signals)\n elif len(signals) == 0 and data_record_duration != 0:\n raise ValueError(\n \"Data record duration must be zero for annotation-only files\"\n )\n\n self._data_record_duration = data_record_duration\n self._set_num_data_records_with_signals(signals)\n self._version = Edf.version.encode(0)\n self.local_patient_identification = patient._to_str()\n self.local_recording_identification = recording._to_str()\n self._set_startdate_with_recording(recording)\n self._starttime = starttime.replace(microsecond=0)\n self._reserved = Edf.reserved.encode(\"\")\n if starttime.microsecond and annotations is None:\n warnings.warn(\"Creating EDF+C to store microsecond starttime.\")\n if annotations is not None or starttime.microsecond:\n signals = (\n *signals,\n _create_annotations_signal(\n annotations if annotations is not None else (),\n num_data_records=self.num_data_records,\n data_record_duration=self.data_record_duration,\n subsecond_offset=starttime.microsecond / 1_000_000,\n ),\n )\n self._reserved = Edf.reserved.encode(\"EDF+C\")\n self._set_signals(signals)\n\n def __repr__(self) -> str:\n signals_text = f\"{len(self.signals)} signal\"\n if len(self.signals) != 1:\n signals_text += \"s\"\n annotations_text = f\"{len(self.annotations)} annotation\"\n if len(self.annotations) != 1:\n annotations_text += \"s\"\n return f\"<Edf {signals_text} {annotations_text}>\"\n\n def _load_data(self, file: Path | io.BufferedReader | io.BytesIO) -> None:\n lens = [signal.samples_per_data_record for signal in self._signals]\n datarecord_len = sum(lens)\n if not isinstance(file, Path):\n datarecords = np.frombuffer(file.read(), dtype=np.int16)\n else:\n datarecords = np.memmap(\n file,\n dtype=np.int16,\n mode=\"r\",\n offset=self.bytes_in_header_record,\n )\n datarecords.shape = (self.num_data_records, datarecord_len)\n ends = np.cumsum(lens)\n starts = ends - lens\n\n for signal, start, end in zip(self._signals, starts, ends):\n signal._digital = datarecords[:, start:end].flatten()\n\n def _read_header(self, buffer: io.BufferedReader | io.BytesIO) -> None:\n for header_name, length in get_header_fields(Edf):\n setattr(self, \"_\" + header_name, buffer.read(length))\n self._signals = self._parse_signal_headers(buffer.read(256 * self._num_signals))\n\n @property\n def signals(self) -> tuple[EdfSignal, ...]:\n \"\"\"\n Ordinary signals contained in the recording.\n\n Annotation signals are excluded. Individual signals can not be removed, added,\n or replaced by modifying this property. Use :meth:`Edf.append_signals`,\n :meth:`Edf.drop_signals`, or :attr:`EdfSignal.data`, respectively.\n \"\"\"\n return tuple(s for s in self._signals if s.label != \"EDF Annotations\")\n\n def _set_signals(self, signals: Sequence[EdfSignal]) -> None:\n signals = tuple(signals)\n self._set_num_data_records_with_signals(signals)\n self._signals = signals\n self._bytes_in_header_record = Edf.bytes_in_header_record.encode(\n 256 * (len(signals) + 1)\n )\n self._num_signals = len(signals)\n if all(s.label == \"EDF Annotations\" for s in signals):\n self._data_record_duration = 0\n\n def _set_num_data_records_with_signals(\n self,\n signals: Sequence[EdfSignal],\n ) -> None:\n if not signals:\n num_data_records = 1\n else:\n signal_durations = [\n round(len(s._digital) / s.sampling_frequency, 12) for s in signals\n ]\n if any(v != signal_durations[0] for v in signal_durations[1:]):\n raise ValueError(\n f\"Inconsistent signal durations (in seconds): {signal_durations}\"\n )\n num_data_records = _calculate_num_data_records(\n signal_durations[0],\n self.data_record_duration,\n )\n signal_lengths = [len(s._digital) for s in signals]\n if any(l % num_data_records for l in signal_lengths):\n raise ValueError(\n f\"Not all signal lengths can be split into {num_data_records} data records: {signal_lengths}\"\n )\n self._num_data_records = Edf.num_data_records.encode(num_data_records)\n\n def _parse_signal_headers(self, raw_signal_headers: bytes) -> tuple[EdfSignal, ...]:\n raw_headers_split: dict[str, list[bytes]] = {}\n start = 0\n for header_name, length in get_header_fields(EdfSignal):\n end = start + length * self._num_signals\n raw_header = raw_signal_headers[start:end]\n raw_headers_split[header_name] = [\n raw_header[i : length + i] for i in range(0, len(raw_header), length)\n ]\n start = end\n signals = []\n for i in range(self._num_signals):\n raw_signal_header = {\n key: raw_headers_split[key][i] for key in raw_headers_split\n }\n try:\n sampling_frequency = (\n int(raw_signal_header[\"samples_per_data_record\"])\n / self.data_record_duration\n )\n except ZeroDivisionError:\n if raw_signal_header[\"_label\"].rstrip() == b\"EDF Annotations\":\n sampling_frequency = 0\n signals.append(\n EdfSignal._from_raw_header(sampling_frequency, **raw_signal_header)\n )\n return tuple(signals)\n\n def write(self, target: Path | str | io.BufferedWriter | io.BytesIO) -> None:\n \"\"\"\n Write an Edf to a file or file-like object.\n\n Parameters\n ----------\n target : Path | str | io.BufferedWriter | io.BytesIO\n The file location (path object or string) or file-like object to write to.\n \"\"\"\n if self.num_data_records == -1:\n warnings.warn(\"num_data_records=-1, determining correct value from data\")\n num_data_records = _calculate_num_data_records(\n len(self._signals[0]._digital) * self._signals[0].sampling_frequency,\n self.data_record_duration,\n )\n else:\n num_data_records = self.num_data_records\n for signal in self._signals:\n signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]\n len(signal._digital) // num_data_records\n )\n header_records = []\n for header_name, _ in get_header_fields(Edf):\n header_records.append(getattr(self, \"_\" + header_name))\n for header_name, _ in get_header_fields(EdfSignal):\n for signal in self._signals:\n header_records.append(getattr(signal, \"_\" + header_name))\n header_record = b\"\".join(header_records)\n\n lens = [signal.samples_per_data_record for signal in self._signals]\n ends = np.cumsum(lens)\n starts = ends - lens\n data_record = np.empty((num_data_records, sum(lens)), dtype=np.int16)\n for signal, start, end in zip(self._signals, starts, ends):\n data_record[:, start:end] = signal._digital.reshape((-1, end - start))\n\n if isinstance(target, str):\n target = Path(target)\n if isinstance(target, io.BufferedWriter):\n target.write(header_record)\n data_record.tofile(target)\n elif isinstance(target, io.BytesIO):\n target.write(header_record)\n target.write(data_record.tobytes())\n else:\n with target.expanduser().open(\"wb\") as file:\n file.write(header_record)\n data_record.tofile(file)\n\n @property\n def labels(self) -> tuple[str, ...]:\n \"\"\"\n The labels of all signals contained in the Edf.\n\n Returns\n -------\n tuple[str, ...]\n The labels, in order of the signals.\n \"\"\"\n return tuple(s.label for s in self.signals)\n\n def get_signal(self, label: str) -> EdfSignal:\n \"\"\"\n Retrieve a single signal by its label.\n\n The label has to be unique - a ValueError is raised if it is ambiguous or does\n not exist.\n\n Parameters\n ----------\n label : str\n A label identifying a single signal\n\n Returns\n -------\n EdfSignal\n The signal corresponding to the given label.\n \"\"\"\n count = self.labels.count(label)\n if count == 0:\n raise ValueError(\n f\"No signal with label {label!r}, possible options: {self.labels}\"\n )\n if count > 1:\n indices = [i for i, l in enumerate(self.labels) if l == label]\n raise ValueError(f\"Ambiguous label {label!r} identifies indices {indices}\")\n return self.signals[self.labels.index(label)]\n\n @property\n def patient(self) -> Patient:\n \"\"\"\n Parsed object representation of the local patient identification.\n\n See :class:`Patient` for information on its attributes.\n \"\"\"\n return Patient._from_str(self.local_patient_identification)\n\n @patient.setter\n def patient(self, patient: Patient) -> None:\n self.local_patient_identification = patient._to_str()\n\n @property\n def recording(self) -> Recording:\n \"\"\"\n Parsed object representation of the local recording identification.\n\n See :class:`Recording` for information on its attributes.\n \"\"\"\n return Recording._from_str(self.local_recording_identification)\n\n @recording.setter\n def recording(self, recording: Recording) -> None:\n self._set_startdate_with_recording(recording)\n self.local_recording_identification = recording._to_str()\n\n @property\n def startdate(self) -> datetime.date:\n \"\"\"\n Recording startdate.\n\n If the :attr:`local_recording_identification` conforms to the EDF+ standard, the\n startdate provided there is used. If not, this falls back to the legacy\n :attr:`startdate` field. If both differ, a warning is issued and the EDF+ field\n is preferred. Raises an `AnonymizedDateError` if the EDF+ field is anonymized\n (i.e., begins with `Startdate X`).\n \"\"\"\n with contextlib.suppress(Exception):\n if self._startdate != self.recording.startdate:\n warnings.warn(\n f\"Different values in startdate fields: {self._startdate}, {self.recording.startdate}\"\n )\n try:\n return self.recording.startdate\n except AnonymizedDateError:\n raise\n except ValueError:\n return self._startdate\n\n @startdate.setter\n def startdate(self, startdate: datetime.date) -> None:\n self._startdate = startdate\n try:\n self.recording.startdate # noqa: B018\n except AnonymizedDateError:\n pass\n except Exception:\n return\n recording_subfields = self.local_recording_identification.split()\n recording_subfields[1] = encode_edfplus_date(startdate)\n self.local_recording_identification = \" \".join(recording_subfields)\n\n @property\n def _subsecond_offset(self) -> float:\n try:\n timekeeping_raw = self._timekeeping_signal._digital.tobytes()\n first_data_record = timekeeping_raw[: timekeeping_raw.find(b\"\\x00\") + 1]\n return _EdfAnnotationsDataRecord.from_bytes(first_data_record).tals[0].onset\n except StopIteration:\n return 0\n\n @property\n def starttime(self) -> datetime.time:\n \"\"\"\n Recording starttime.\n\n In EDF+ files, microsecond accuracy is supported.\n \"\"\"\n subsecond_offset = self._subsecond_offset\n try:\n return self._starttime.replace(\n microsecond=round(subsecond_offset * 1000000)\n )\n except ValueError as e:\n raise ValueError(\n f\"Subsecond offset in first annotation must be 0.X, is {subsecond_offset}\"\n ) from e\n\n @starttime.setter\n def starttime(self, starttime: datetime.time) -> None:\n onset_change = starttime.microsecond / 1000000 - self._subsecond_offset\n self._starttime = starttime.replace(microsecond=0)\n if starttime.microsecond != self.starttime.microsecond:\n timekeeping_signal = self._timekeeping_signal\n data_records = []\n for data_record in timekeeping_signal._digital.reshape(\n (-1, timekeeping_signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n for tal in annot_dr.tals:\n tal.onset = round(tal.onset + onset_change, 12)\n data_records.append(annot_dr.to_bytes())\n maxlen = max(len(data_record) for data_record in data_records)\n if maxlen % 2:\n maxlen += 1\n raw = b\"\".join(dr.ljust(maxlen, b\"\\x00\") for dr in data_records)\n timekeeping_signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]\n maxlen // 2\n )\n timekeeping_signal._sampling_frequency = (\n maxlen // 2 * self.data_record_duration\n )\n timekeeping_signal._digital = np.frombuffer(raw, dtype=np.int16)\n\n def _set_startdate_with_recording(self, recording: Recording) -> None:\n try:\n self._startdate = recording.startdate\n except AnonymizedDateError:\n self._startdate = datetime.date(1985, 1, 1)\n\n @property\n def data_record_duration(self) -> float:\n \"\"\"Duration of each data record in seconds.\"\"\"\n return self._data_record_duration\n\n def update_data_record_duration(\n self,\n data_record_duration: float,\n method: Literal[\"strict\", \"pad\", \"truncate\"] = \"strict\",\n ) -> None:\n \"\"\"\n Update the data record duration.\n\n This operation will fail if the new duration is incompatible with the current\n sampling frequencies.\n\n Parameters\n ----------\n data_record_duration : float\n The new data record duration in seconds.\n method : `{\"strict\", \"pad\", \"truncate\"}`, default: `\"strict\"`\n How to handle the case where the new duration does not divide the Edf\n duration evenly\n\n - \"strict\": Raise a ValueError\n - \"pad\": Pad the data with zeros to the next compatible duration. If zero\n is outside the physical range, data is padded with the physical minimum.\n - \"truncate\": Truncate the data to the previous compatible duration (might\n lead to loss of data)\n \"\"\"\n if data_record_duration == self.data_record_duration:\n return\n if data_record_duration <= 0:\n raise ValueError(\n f\"Data record duration must be positive, got {data_record_duration}\"\n )\n if not self.signals:\n raise ValueError(\n \"Data record duration must be zero for annotation-only files\"\n )\n for signal in self.signals:\n spr = signal.sampling_frequency * data_record_duration\n if spr % 1:\n raise ValueError(\n f\"Cannot set data record duration to {data_record_duration}: Incompatible sampling frequency {signal.sampling_frequency} Hz\"\n )\n\n num_data_records = self._pad_or_truncate_signals(data_record_duration, method)\n self._update_record_duration_in_annotation_signals(\n data_record_duration, num_data_records\n )\n self._data_record_duration = data_record_duration\n self._num_data_records = Edf.num_data_records.encode(num_data_records)\n\n @property\n def num_signals(self) -> int:\n \"\"\"Return the number of signals, excluding annotation signals for EDF+.\"\"\"\n return len(self.signals)\n\n def _pad_or_truncate_signals(\n self, data_record_duration: float, method: Literal[\"strict\", \"pad\", \"truncate\"]\n ) -> int:\n if method == \"pad\":\n new_duration = (\n ceil(self.duration / data_record_duration) * data_record_duration\n )\n self._pad_or_truncate_data(new_duration)\n return round(new_duration / data_record_duration)\n if method == \"truncate\":\n new_duration = (\n floor(self.duration / data_record_duration) * data_record_duration\n )\n self._pad_or_truncate_data(new_duration)\n return round(new_duration / data_record_duration)\n return _calculate_num_data_records(self.duration, data_record_duration)\n\n def _update_record_duration_in_annotation_signals(\n self, data_record_duration: float, num_data_records: int\n ) -> None:\n signals = list(self._signals)\n for idx, signal in enumerate(self._signals):\n if signal not in self._annotation_signals:\n continue\n annotations = []\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if signal is self._timekeeping_signal:\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n signals[idx] = _create_annotations_signal(\n [\n EdfAnnotation(a.onset - self._subsecond_offset, a.duration, a.text)\n for a in annotations\n ],\n num_data_records=num_data_records,\n data_record_duration=data_record_duration,\n with_timestamps=signal is self._timekeeping_signal,\n subsecond_offset=self._subsecond_offset,\n )\n self._signals = tuple(signals)\n\n def _pad_or_truncate_data(self, new_duration: float) -> None:\n for signal in self.signals:\n n_samples = round(new_duration * signal.sampling_frequency)\n diff = n_samples - len(signal._digital)\n if diff > 0:\n physical_pad_value = 0.0\n if signal.physical_min > 0 or signal.physical_max < 0:\n physical_pad_value = signal.physical_min\n signal._set_data(\n np.pad(signal.data, (0, diff), constant_values=physical_pad_value)\n )\n elif diff < 0:\n signal._set_data(signal.data[:diff])\n\n def anonymize(self) -> None:\n \"\"\"\n Anonymize a recording.\n\n Header fields are modified as follows:\n - local patient identification is set to `X X X X`\n - local recording identification is set to `Startdate X X X X`\n - startdate is set to `01.01.85`\n - starttime is set to `00.00.00`\n\n For EDF+ files, subsecond starttimes specified via an annotations signal are\n removed.\n \"\"\"\n self.patient = Patient()\n self.recording = Recording()\n self.starttime = datetime.time(0, 0, 0)\n\n def drop_signals(self, drop: Iterable[int | str]) -> None:\n \"\"\"\n Drop signals by index or label.\n\n Signal indices (int) and labels (str) can be provided in the same iterable. For\n ambiguous labels, all corresponding signals are dropped. Raises a ValueError if\n at least one of the provided identifiers does not correspond to a signal.\n\n Parameters\n ----------\n drop : Iterable[int | str]\n The signals to drop, identified by index or label.\n \"\"\"\n if isinstance(drop, str):\n drop = [drop]\n selected: list[EdfSignal] = []\n dropped: list[int | str] = []\n i = 0\n for signal in self._signals:\n if signal.label == \"EDF Annotations\":\n selected.append(signal)\n continue\n if i in drop or signal.label in drop:\n dropped.append(i)\n dropped.append(signal.label)\n else:\n selected.append(signal)\n i += 1\n if not_dropped := set(drop) - set(dropped):\n raise ValueError(f\"No signal found with index/label {not_dropped}\")\n self._signals = tuple(selected)\n self._bytes_in_header_record = Edf.bytes_in_header_record.encode(\n 256 * (len(selected) + 1)\n )\n self._num_signals = len(selected)\n\n def append_signals(self, new_signals: EdfSignal | Iterable[EdfSignal]) -> None:\n \"\"\"\n Append one or more signal(s) to the Edf recording.\n\n Every signal must be compatible with the current `data_record_duration` and all\n signal durations must match the overall recording duration. For recordings\n containing EDF+ annotation signals, the new signals are inserted after the last\n ordinary (i.e. non-annotation) signal.\n\n Parameters\n ----------\n new_signals : EdfSignal | Iterable[EdfSignal]\n The signal(s) to add.\n \"\"\"\n if isinstance(new_signals, EdfSignal):\n new_signals = [new_signals]\n last_ordinary_index = 0\n for i, signal in enumerate(self._signals):\n if signal.label != \"EDF Annotations\":\n last_ordinary_index = i\n self._set_signals(\n [\n *self._signals[: last_ordinary_index + 1],\n *new_signals,\n *self._signals[last_ordinary_index + 1 :],\n ]\n )\n\n @property\n def _annotation_signals(self) -> Iterable[EdfSignal]:\n return (signal for signal in self._signals if signal.label == \"EDF Annotations\")\n\n @property\n def _timekeeping_signal(self) -> EdfSignal:\n return next(iter(self._annotation_signals))\n\n @property\n def duration(self) -> float:\n \"\"\"Recording duration in seconds.\"\"\"\n return self.num_data_records * self.data_record_duration\n\n @property\n def annotations(self) -> tuple[EdfAnnotation, ...]:\n \"\"\"\n All annotations contained in the Edf, sorted chronologically.\n\n Does not include timekeeping annotations.\n \"\"\"\n annotations: list[EdfAnnotation] = []\n for i, signal in enumerate(self._annotation_signals):\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if i == 0:\n # from https://www.edfplus.info/specs/edfplus.html#timekeeping:\n # The first annotation of the first 'EDF Annotations' signal in each\n # data record is empty, but its timestamp specifies how many seconds\n # after the file startdate/time that data record starts.\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n subsecond_offset = self._subsecond_offset\n annotations = [\n EdfAnnotation(\n round(ann.onset - subsecond_offset, 12), ann.duration, ann.text\n )\n for ann in annotations\n ]\n return tuple(sorted(annotations))\n\n def drop_annotations(self, text: str) -> None:\n \"\"\"\n Drop annotations with a given text.\n\n Parameters\n ----------\n text : str\n All annotations whose text exactly matches this parameter are removed.\n \"\"\"\n for signal in self._annotation_signals:\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annotations = _EdfAnnotationsDataRecord.from_bytes(\n data_record.tobytes()\n )\n annotations.drop_annotations_with_text(text)\n data_record[:] = np.frombuffer(\n annotations.to_bytes().ljust(len(data_record) * 2, b\"\\x00\"),\n dtype=np.int16,\n )\n\n def to_bytes(self) -> bytes:\n \"\"\"\n Convert an Edf to a `bytes` object.\n\n Returns\n -------\n bytes\n The binary representation of the Edf object (i.e., what a file created with\n `Edf.write` would contain).\n \"\"\"\n stream = io.BytesIO()\n self.write(stream)\n stream.seek(0)\n return stream.read()\n\n def slice_between_seconds(\n self,\n start: float,\n stop: float,\n *,\n keep_all_annotations: bool = False,\n ) -> None:\n \"\"\"\n Slice to the interval between two times.\n\n The sample point corresponding to `stop` is excluded. `start` and `stop` are\n given in seconds from recording start and have to correspond exactly to a sample\n time in all non-annotation signals.\n\n Parameters\n ----------\n start : float\n Start time in seconds from recording start.\n stop : float\n Stop time in seconds from recording start.\n keep_all_annotations : bool, default: False\n If set to `True`, annotations outside the selected time interval are kept.\n \"\"\"\n signals: list[EdfSignal] = []\n self._verify_seconds_inside_recording_time(start)\n self._verify_seconds_inside_recording_time(stop)\n self._verify_seconds_coincide_with_sample_time(start)\n self._verify_seconds_coincide_with_sample_time(stop)\n self._num_data_records = Edf.num_data_records.encode(\n int((stop - start) / self.data_record_duration)\n )\n for signal in self._signals:\n if signal.label == \"EDF Annotations\":\n signals.append(\n self._slice_annotations_signal(\n signal,\n start=start,\n stop=stop,\n keep_all_annotations=keep_all_annotations,\n )\n )\n else:\n start_index = start * signal.sampling_frequency\n stop_index = stop * signal.sampling_frequency\n signal._digital = signal._digital[int(start_index) : int(stop_index)]\n signals.append(signal)\n self._set_signals(signals)\n self._shift_startdatetime(int(start))\n\n def slice_between_annotations(\n self,\n start_text: str,\n stop_text: str,\n *,\n keep_all_annotations: bool = False,\n ) -> None:\n \"\"\"\n Slice to the interval between two EDF+ annotations.\n\n The sample point corresponding to the onset of the annotation identified by\n `stop_text` is excluded. `start_text` and `stop_text` each have to uniquely\n identify a single annotation, whose onset corresponds exactly to a sample time\n in all non-annotation signals.\n\n Parameters\n ----------\n start_text : str\n Text identifying the start annotation.\n stop_text : str\n Text identifying the stop annotation.\n keep_all_annotations : bool, default: False\n If set to `True`, annotations outside the selected time interval are kept.\n \"\"\"\n self.slice_between_seconds(\n self._get_annotation_by_text(start_text).onset,\n self._get_annotation_by_text(stop_text).onset,\n keep_all_annotations=keep_all_annotations,\n )\n\n def _get_annotation_by_text(self, text: str) -> EdfAnnotation:\n matches = []\n for annotation in self.annotations:\n if annotation.text == text:\n matches.append(annotation)\n if len(matches) == 1:\n return matches[0]\n if len(matches) > 1:\n raise ValueError(\n f\"Ambiguous annotation text {text!r}, found {len(matches)} matches\"\n )\n raise ValueError(f\"No annotation found with text {text!r}\")\n\n def _verify_seconds_inside_recording_time(self, seconds: float) -> None:\n if not 0 <= seconds <= self.duration:\n raise ValueError(\n f\"{seconds} is an invalid slice time for recording duration {self.duration}\"\n )\n\n def _verify_seconds_coincide_with_sample_time(self, seconds: float) -> None:\n for i, signal in enumerate(self.signals):\n index = seconds * signal.sampling_frequency\n if index != int(index):\n raise ValueError(\n f\"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz\"\n )\n\n def _shift_startdatetime(self, seconds: float) -> None:\n timedelta = datetime.timedelta(seconds=seconds)\n try:\n startdate = self.startdate\n startdate_anonymized = False\n except AnonymizedDateError:\n startdate = datetime.date.fromtimestamp(0)\n startdate_anonymized = True\n startdatetime = datetime.datetime.combine(startdate, self.starttime)\n startdatetime += timedelta\n if not startdate_anonymized:\n self.startdate = startdatetime.date()\n self.starttime = startdatetime.time()\n\n def copy(self) -> Edf:\n \"\"\"\n Create a deep copy of the Edf.\n\n Returns\n -------\n Edf\n The copied Edf object.\n \"\"\"\n return copy.deepcopy(self)\n\n def _slice_annotations_signal(\n self,\n signal: EdfSignal,\n *,\n start: float,\n stop: float,\n keep_all_annotations: bool,\n ) -> EdfSignal:\n is_timekeeping_signal = signal == self._timekeeping_signal\n annotations: list[EdfAnnotation] = []\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if is_timekeeping_signal:\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n annotations = [\n EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)\n for a in annotations\n if keep_all_annotations or start <= a.onset < stop\n ]\n return _create_annotations_signal(\n annotations,\n num_data_records=self.num_data_records,\n data_record_duration=self.data_record_duration,\n with_timestamps=is_timekeeping_signal,\n subsecond_offset=self._subsecond_offset + start - int(start),\n )" }, { "identifier": "EdfSignal", "path": "edfio/edf.py", "snippet": "class EdfSignal:\n \"\"\"A single EDF signal.\n\n Attributes that might break the signal or file on modification (i.e.,\n `sampling_frequency`, `physical_range`, `digital_range`, `samples_per_data_record`,\n and `reserved`) can not be set after instantiation.\n\n To reduce memory consumption, signal data is always stored as a 16-bit integer array\n containing the digital values that would be written to the corresponding EDF file.\n Therefore, it is expected that `EdfSignal.data` does not match the physical\n values passed during instantiation exactly.\n\n Parameters\n ----------\n data : npt.NDArray[np.float64]\n The signal data (physical values).\n sampling_frequency : float\n The sampling frequency in Hz.\n label : str, default: `\"\"`\n The signal's label, e.g., `\"EEG Fpz-Cz\"` or `\"Body temp\"`.\n transducer_type : str, default: `\"\"`\n The transducer type, e.g., `\"AgAgCl electrode\"`.\n physical_dimension : str, default: `\"\"`\n The physical dimension, e.g., `\"uV\"` or `\"degreeC\"`\n physical_range : tuple[float, float] | None, default: None\n The physical range given as a tuple of `(physical_min, physical_max)`. If\n `None`, this is determined from the data.\n digital_range : tuple[int, int], default: `(-32768, 32767)`\n The digital range given as a tuple of `(digital_min, digital_max)`. Uses the\n maximum resolution of 16-bit integers by default.\n prefiltering : str, default: `\"\"`\n The signal prefiltering, e.g., `\"HP:0.1Hz LP:75Hz\"`.\n \"\"\"\n\n _label = RawHeaderFieldStr(16, is_settable=True)\n transducer_type = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"Transducer type, e.g., `\"AgAgCl electrode\"`.\"\"\"\n physical_dimension = RawHeaderFieldStr(8, is_settable=True)\n \"\"\"Physical dimension, e.g., `\"uV\"` or `\"degreeC\"`.\"\"\"\n physical_min = RawHeaderFieldFloat(8)\n \"\"\"Physical minimum, e.g., `-500` or `34`.\"\"\"\n physical_max = RawHeaderFieldFloat(8)\n \"\"\"Physical maximum, e.g., `500` or `40`.\"\"\"\n digital_min = RawHeaderFieldInt(8)\n \"\"\"Digital minimum, e.g., `-2048`.\"\"\"\n digital_max = RawHeaderFieldInt(8)\n \"\"\"Digital maximum, e.g., `2047`.\"\"\"\n prefiltering = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"Signal prefiltering, e.g., `\"HP:0.1Hz LP:75Hz\"`.\"\"\"\n samples_per_data_record = RawHeaderFieldInt(8)\n \"\"\"\n Number of samples in each data record.\n\n For newly instantiated :class:`EdfSignal` objects, this is only set once\n :meth:`Edf.write` is called.\n \"\"\"\n reserved = RawHeaderFieldStr(32)\n \"\"\"Reserved signal header field, always `\"\"`\"\"\"\n\n def __init__(\n self,\n data: npt.NDArray[np.float64],\n sampling_frequency: float,\n *,\n label: str = \"\",\n transducer_type: str = \"\",\n physical_dimension: str = \"\",\n physical_range: tuple[float, float] | None = None,\n digital_range: tuple[int, int] = (-32768, 32767),\n prefiltering: str = \"\",\n ):\n self._sampling_frequency = sampling_frequency\n self.label = label\n self.transducer_type = transducer_type\n self.physical_dimension = physical_dimension\n self.prefiltering = prefiltering\n self._reserved = EdfSignal.reserved.encode(\"\")\n if not np.all(np.isfinite(data)):\n raise ValueError(\"Signal data must contain only finite values\")\n self._set_physical_range(physical_range, data)\n self._set_digital_range(digital_range)\n self._set_data(data)\n\n def __repr__(self) -> str:\n info = f\"{self.sampling_frequency:g}Hz\"\n if self.label:\n info = f\"{self.label} \" + info\n return f\"<EdfSignal {info}>\"\n\n @classmethod\n def _from_raw_header(\n cls,\n sampling_frequency: float,\n *,\n _label: bytes,\n transducer_type: bytes,\n physical_dimension: bytes,\n physical_min: bytes,\n physical_max: bytes,\n digital_min: bytes,\n digital_max: bytes,\n prefiltering: bytes,\n samples_per_data_record: bytes,\n reserved: bytes,\n ) -> EdfSignal:\n sig = object.__new__(cls)\n sig._sampling_frequency = sampling_frequency\n sig._label = EdfSignal._label.decode(_label) # type: ignore[attr-defined]\n sig._transducer_type = transducer_type # type: ignore[attr-defined]\n sig._physical_dimension = physical_dimension # type: ignore[attr-defined]\n sig._physical_min = physical_min # type: ignore[attr-defined]\n sig._physical_max = physical_max # type: ignore[attr-defined]\n sig._digital_min = digital_min # type: ignore[attr-defined]\n sig._digital_max = digital_max # type: ignore[attr-defined]\n sig._prefiltering = prefiltering # type: ignore[attr-defined]\n sig._samples_per_data_record = samples_per_data_record # type: ignore[attr-defined]\n sig._reserved = reserved # type: ignore[attr-defined]\n return sig\n\n @classmethod\n def from_hypnogram(\n cls,\n stages: npt.NDArray[np.float64],\n stage_duration: float = 30,\n *,\n label: str = \"\",\n ) -> EdfSignal:\n \"\"\"Create an EDF signal from a hypnogram, with scaling according to EDF specs.\n\n According to the EDF FAQ [1]_, use integer numbers 0, 1, 2, 3, 4, 5, 6, and 9\n for sleep stages W, 1, 2, 3, 4, R, MT, und unscored, respectively. The digital\n range is set to `(0, 9)`.\n\n Parameters\n ----------\n stages : npt.NDArray[np.float64]\n The sleep stages, coded as integer numbers.\n stage_duration : float, default: `30`\n The duration of each sleep stage in seconds, used to set the sampling\n frequency to its inverse.\n label : str, default: `\"\"`\n The signal's label.\n\n Returns\n -------\n EdfSignal\n The resulting :class:`EdfSignal` object.\n\n References\n ----------\n .. [1] EDF FAQ, https://www.edfplus.info/specs/edffaq.html\n \"\"\"\n allowed_stages = {0, 1, 2, 3, 4, 5, 6, 9}\n if invalid_stages := set(stages) - allowed_stages:\n raise ValueError(f\"stages contains invalid values: {invalid_stages}\")\n return EdfSignal(\n data=stages,\n sampling_frequency=1 / stage_duration,\n label=label,\n physical_range=(0, 9),\n digital_range=(0, 9),\n )\n\n @property\n def label(self) -> str:\n \"\"\"Signal label, e.g., `\"EEG Fpz-Cz\"` or `\"Body temp\"`.\"\"\"\n return self._label\n\n @label.setter\n def label(self, label: str) -> None:\n if label == \"EDF Annotations\":\n raise ValueError(\"Ordinary signal label must not be 'EDF Annotations'.\")\n self._label = label\n\n @property\n def physical_range(self) -> FloatRange:\n \"\"\"The physical range as a tuple of `(physical_min, physical_max)`.\"\"\"\n return FloatRange(self.physical_min, self.physical_max)\n\n @property\n def digital_range(self) -> IntRange:\n \"\"\"The digital range as a tuple of `(digital_min, digital_max)`.\"\"\"\n return IntRange(self.digital_min, self.digital_max)\n\n @property\n def sampling_frequency(self) -> float:\n \"\"\"The sampling frequency in Hz.\"\"\"\n return self._sampling_frequency\n\n @property\n def data(self) -> npt.NDArray[np.float64]:\n \"\"\"\n Numpy array containing the physical signal values as floats.\n\n To simplify avoiding inconsistencies between signal data and header fields,\n individual values in the returned array can not be modified. Use\n :meth:`EdfSignal.update_data` to overwrite with new physical data.\n \"\"\"\n try:\n gain, offset = calculate_gain_and_offset(\n self.digital_min,\n self.digital_max,\n self.physical_min,\n self.physical_max,\n )\n except ZeroDivisionError:\n data = self._digital.astype(np.float64)\n warnings.warn(\n f\"Digital minimum equals digital maximum ({self.digital_min}) for {self.label}, returning uncalibrated signal.\"\n )\n except ValueError:\n data = self._digital.astype(np.float64)\n else:\n data = (self._digital + offset) * gain\n data.setflags(write=False)\n return data\n\n def update_data(\n self,\n data: npt.NDArray[np.float64],\n *,\n keep_physical_range: bool = False,\n sampling_frequency: float | None = None,\n ) -> None:\n \"\"\"\n Overwrite physical signal values with an array of equal length.\n\n Parameters\n ----------\n data : npt.NDArray[np.float64]\n The new physical data.\n keep_physical_range : bool, default: False\n If `True`, the `physical_range` is not modified to accomodate the new data.\n sampling_frequency : float | None, default: None\n If not `None`, the `sampling_frequency` is updated to the new value. The new\n data must match the expected length for the new sampling frequency.\n \"\"\"\n expected_length = len(self._digital)\n if (\n sampling_frequency is not None\n and sampling_frequency != self._sampling_frequency\n ):\n expected_length = self._get_expected_new_length(sampling_frequency)\n if len(data) != expected_length:\n raise ValueError(\n f\"Signal lengths must match: got {len(data)}, expected {len(self._digital)}.\"\n )\n physical_range = self.physical_range if keep_physical_range else None\n self._set_physical_range(physical_range, data)\n if sampling_frequency is not None:\n self._sampling_frequency = sampling_frequency\n self._set_data(data)\n\n def _get_expected_new_length(self, sampling_frequency: float) -> int:\n if sampling_frequency <= 0:\n raise ValueError(\n f\"Sampling frequency must be positive, got {sampling_frequency}\"\n )\n current_length = len(self._digital)\n expected_length_f = (\n sampling_frequency / self._sampling_frequency * current_length\n )\n if not math.isclose(expected_length_f, round(expected_length_f), rel_tol=1e-10):\n raise ValueError(\n f\"Sampling frequency of {sampling_frequency} results in non-integer number of samples ({expected_length_f})\"\n )\n return round(expected_length_f)\n\n def _set_digital_range(self, digital_range: tuple[int, int]) -> None:\n digital_range = IntRange(*digital_range)\n if digital_range.min == digital_range.max:\n raise ValueError(\n f\"Digital minimum ({digital_range.min}) must differ from digital maximum ({digital_range.max}).\"\n )\n self._digital_min = EdfSignal.digital_min.encode(digital_range.min)\n self._digital_max = EdfSignal.digital_max.encode(digital_range.max)\n\n def _set_physical_range(\n self,\n physical_range: tuple[float, float] | None,\n data: npt.NDArray[np.float64],\n ) -> None:\n if physical_range is None:\n physical_range = FloatRange(data.min(), data.max())\n if physical_range.min == physical_range.max:\n physical_range = FloatRange(physical_range.min, physical_range.max + 1)\n else:\n physical_range = FloatRange(*physical_range)\n if physical_range.min == physical_range.max:\n raise ValueError(\n f\"Physical minimum ({physical_range.min}) must differ from physical maximum ({physical_range.max}).\"\n )\n data_min = data.min()\n data_max = data.max()\n if data_min < physical_range.min or data_max > physical_range.max:\n raise ValueError(\n f\"Signal range [{data_min}, {data_max}] out of physical range: [{physical_range.min}, {physical_range.max}]\"\n )\n self._physical_min = EdfSignal.physical_min.encode(\n round_float_to_8_characters(physical_range.min, math.floor)\n )\n self._physical_max = EdfSignal.physical_max.encode(\n round_float_to_8_characters(physical_range.max, math.ceil)\n )\n\n def _set_data(self, data: npt.NDArray[np.float64]) -> None:\n gain, offset = calculate_gain_and_offset(\n self.digital_min,\n self.digital_max,\n self.physical_min,\n self.physical_max,\n )\n self._digital = np.round(data / gain - offset).astype(np.int16)" }, { "identifier": "Patient", "path": "edfio/edf.py", "snippet": "class Patient:\n \"\"\"\n Object representation of the local patient identification.\n\n Parsing from/to the string containing the local_patient_identification header field\n is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may not\n contain spaces.\n\n Parameters\n ----------\n code : str, default: `\"X\"`\n The code by which the patient is known in the hospital administration.\n sex : `{\"X\", \"F\", \"M\"}`, default: `\"X\"`\n Sex, `F` for female, `M` for male, `X` if anonymized.\n birthdate : datetime.date | None, default: None\n Patient birthdate, stored as `X` if `None`.\n name : str, default: `\"X\"`\n The patient's name, stored as `X` if `None`.\n additional : Sequence[str], default: `()`\n Optional additional subfields. Will be stored in the header field separated by\n spaces.\n \"\"\"\n\n def __init__(\n self,\n *,\n code: str = \"X\",\n sex: Literal[\"F\", \"M\", \"X\"] = \"X\",\n birthdate: datetime.date | None = None,\n name: str = \"X\",\n additional: Sequence[str] = (),\n ) -> None:\n if sex not in (\"F\", \"M\", \"X\"):\n raise ValueError(f\"Invalid sex: {sex}, must be one of F, M, X\")\n if birthdate is None:\n birthdate_field = \"X\"\n else:\n birthdate_field = encode_edfplus_date(birthdate)\n subfields = {\n \"code\": code,\n \"sex\": sex,\n \"birthdate\": birthdate_field,\n \"name\": name,\n **{f\"additional[{i}]\": v for i, v in enumerate(additional)},\n }\n validate_subfields(subfields)\n local_patient_identification = \" \".join(subfields.values())\n encode_str(local_patient_identification, 80)\n self._local_patient_identification = local_patient_identification\n\n def __repr__(self) -> str:\n try:\n return repr_from_init(self)\n except Exception:\n return repr(self._local_patient_identification)\n\n @classmethod\n def _from_str(cls, string: str) -> Patient:\n encode_str(string, 80)\n obj = object.__new__(cls)\n obj._local_patient_identification = string\n return obj\n\n def _to_str(self) -> str:\n return self._local_patient_identification\n\n @property\n def code(self) -> str:\n \"\"\"The code by which the patient is known in the hospital administration.\"\"\"\n return self.get_subfield(0)\n\n @property\n def sex(self) -> str:\n \"\"\"Sex, `F` for female, `M` for male, `X` if anonymized.\"\"\"\n return self.get_subfield(1)\n\n @property\n def birthdate(self) -> datetime.date:\n \"\"\"Patient birthdate.\"\"\"\n birthdate_field = self.get_subfield(2)\n if birthdate_field == \"X\":\n raise AnonymizedDateError(\"Patient birthdate is not available ('X').\")\n return decode_edfplus_date(birthdate_field)\n\n @property\n def name(self) -> str:\n \"\"\"The patient's name.\"\"\"\n return self.get_subfield(3)\n\n @property\n def additional(self) -> tuple[str, ...]:\n \"\"\"Optional additional subfields.\"\"\"\n return tuple(self._local_patient_identification.split()[4:])\n\n def get_subfield(self, idx: int) -> str:\n \"\"\"\n Access a subfield of the local patient identification field by index.\n\n Parameters\n ----------\n idx : int\n The index of the subfield to access.\n\n Returns\n -------\n str\n The subfield at the specified index. If the index exceeds the actually\n available number of subfields, the return value is `\"X\"`.\n \"\"\"\n subfields = self._local_patient_identification.split()\n if len(subfields) <= idx:\n return \"X\"\n return subfields[idx]" }, { "identifier": "Recording", "path": "edfio/edf.py", "snippet": "class Recording:\n \"\"\"\n Object representation of the local recording identification.\n\n Parsing from/to the string containing the local_recording_identification header\n field is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may\n not contain spaces.\n\n Parameters\n ----------\n startdate : datetime.date | None, default: None\n The recording startdate.\n hospital_administration_code : str, default: `\"X\"`\n The hospital administration code of the investigation, e.g., EEG number or PSG\n number.\n investigator_technician_code : str, default: `\"X\"`\n A code specifying the responsible investigator or technician.\n equipment_code : str, default: `\"X\"`\n A code specifying the used equipment.\n additional : Sequence[str], default: `()`\n Optional additional subfields. Will be stored in the header field separated by\n spaces.\n \"\"\"\n\n def __init__(\n self,\n *,\n startdate: datetime.date | None = None,\n hospital_administration_code: str = \"X\",\n investigator_technician_code: str = \"X\",\n equipment_code: str = \"X\",\n additional: Sequence[str] = (),\n ) -> None:\n if startdate is None:\n startdate_field = \"X\"\n else:\n startdate_field = encode_edfplus_date(startdate)\n subfields = {\n \"startdate\": startdate_field,\n \"hospital_administration_code\": hospital_administration_code,\n \"investigator_technician_code\": investigator_technician_code,\n \"equipment_code\": equipment_code,\n **{f\"additional[{i}]\": v for i, v in enumerate(additional)},\n }\n validate_subfields(subfields)\n local_recording_identification = \" \".join((\"Startdate\", *subfields.values()))\n encode_str(local_recording_identification, 80)\n self._local_recording_identification = local_recording_identification\n\n def __repr__(self) -> str:\n try:\n return repr_from_init(self)\n except Exception:\n return repr(self._local_recording_identification)\n\n @classmethod\n def _from_str(cls, string: str) -> Recording:\n encode_str(string, 80)\n obj = object.__new__(cls)\n obj._local_recording_identification = string\n return obj\n\n def _to_str(self) -> str:\n return self._local_recording_identification\n\n @property\n def startdate(self) -> datetime.date:\n \"\"\"The recording startdate.\"\"\"\n if not self._local_recording_identification.startswith(\"Startdate \"):\n raise ValueError(\n f\"Local recording identification field {self._local_recording_identification!r} does not follow EDF+ standard.\"\n )\n startdate_field = self.get_subfield(1)\n if startdate_field == \"X\":\n raise AnonymizedDateError(\"Recording startdate is not available ('X').\")\n return decode_edfplus_date(startdate_field)\n\n @property\n def hospital_administration_code(self) -> str:\n \"\"\"The hospital administration code of the investigation.\"\"\"\n return self.get_subfield(2)\n\n @property\n def investigator_technician_code(self) -> str:\n \"\"\"A code specifying the responsible investigator or technician.\"\"\"\n return self.get_subfield(3)\n\n @property\n def equipment_code(self) -> str:\n \"\"\"A code specifying the used equipment.\"\"\"\n return self.get_subfield(4)\n\n @property\n def additional(self) -> tuple[str, ...]:\n \"\"\"Optional additional subfields.\"\"\"\n return tuple(self._local_recording_identification.split()[5:])\n\n def get_subfield(self, idx: int) -> str:\n \"\"\"\n Access a subfield of the local recording identification field by index.\n\n Parameters\n ----------\n idx : int\n The index of the subfield to access. The first subfield (starting at\n index 0) should always be \"Startdate\" according to the EDF+ spedification.\n\n Returns\n -------\n str\n The subfield at the specified index. If the index exceeds the actually\n available number of subfields, the return value is `\"X\"`.\n \"\"\"\n subfields = self._local_recording_identification.split()\n if len(subfields) <= idx:\n return \"X\"\n return subfields[idx]" } ]
import datetime import numpy as np import pytest from edfio import AnonymizedDateError, Edf, EdfSignal, Patient, Recording
13,683
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording(): return Recording._from_str( "Startdate 02-MAR-2002 EMG561 BK/JOP Sony. MNC R Median Nerve." ) @pytest.fixture() def edf(patient, recording):
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording(): return Recording._from_str( "Startdate 02-MAR-2002 EMG561 BK/JOP Sony. MNC R Median Nerve." ) @pytest.fixture() def edf(patient, recording):
return Edf([EdfSignal(np.arange(10), 1)], patient=patient, recording=recording)
1
2023-11-09 09:53:27+00:00
16k
sb-ai-lab/HypEx
hypex/matcher.py
[ { "identifier": "FaissMatcher", "path": "hypex/algorithms/faiss_matcher.py", "snippet": "class FaissMatcher:\n \"\"\"A class used to match instances using Faiss library.\"\"\"\n\n def __init__(\n self,\n df: pd.DataFrame,\n outcomes: str,\n treatment: str,\n info_col: list,\n features: [list, pd.DataFrame] = None,\n group_col: str = None,\n weights: dict = None,\n sigma: float = 1.96,\n validation: bool = None,\n n_neighbors: int = 10,\n silent: bool = True,\n pbar: bool = True,\n ):\n \"\"\"Construct all the necessary attributes.\n\n Args:\n df:\n The input dataframe\n outcomes:\n The target column name\n treatment:\n The column name with treatment\n info_col:\n A list with informational column names\n features:\n A list with names of feature using to matching. Defaults to None\n group_col:\n The column for stratification. Defaults to None\n weights:\n Dict with wight of features to matching. If you would like that matching will be more for\n 1 feature and less for another one\n sigma:\n The significant level for confidence interval calculation Defaults to 1.96\n validation:\n The flag for validation of estimated ATE with default method `random_feature`\n n_neighbors:\n The number of neighbors to find for each object. Defaults to 10\n silent:\n Write logs in debug mode\n pbar:\n Display progress bar while get index\n \"\"\"\n self.n_neighbors = n_neighbors\n if group_col is None:\n self.df = df\n else:\n self.df = df.sort_values([treatment, group_col])\n self.columns_del = [outcomes]\n if info_col:\n self.info_col = info_col\n else:\n self.info_col = []\n\n if self.info_col is not None:\n self.columns_del = self.columns_del + [x for x in self.info_col if x in self.df.columns]\n self.outcomes = outcomes if type(outcomes) == list else [outcomes]\n self.treatment = treatment\n\n if features is None:\n self.columns_match = list(\n set([x for x in list(self.df.columns) if x not in self.info_col] + [self.treatment] + self.outcomes)\n )\n else:\n try:\n self.columns_match = features[\"Feature\"].tolist() + [self.treatment] + self.outcomes\n except TypeError:\n self.columns_match = features + [self.treatment] + self.outcomes\n\n self.features_quality = (\n self.df.drop(columns=[self.treatment] + self.outcomes + self.info_col)\n .select_dtypes(include=[\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"])\n .columns\n )\n self.dict_outcome_untreated = {}\n self.dict_outcome_treated = {}\n self.group_col = group_col\n self.weights = weights\n self.treated_index = None\n self.untreated_index = None\n self.orig_treated_index = None\n self.orig_untreated_index = None\n self.results = {}\n self.ATE = None\n self.sigma = sigma\n self.quality_dict = {}\n self.rep_dict = None\n self.validation = validation\n self.silent = silent\n self.pbar = pbar\n self.tqdm = None\n self.results = pd.DataFrame()\n\n def __getstate__(self) -> dict:\n \"\"\"Prepare the object for serialization.\n\n This method is called when the object is about to be serialized.\n It removes the `tqdm` attribute from the object's dictionary\n because `tqdm` objects cannot be serialized.\n\n Returns:\n A copy of the object's dictionary with the `tqdm` attribute removed.\n \"\"\"\n state = self.__dict__.copy()\n if \"tqdm\" in state:\n del state[\"tqdm\"]\n return state\n\n def __setstate__(self, state: dict):\n \"\"\"Restore the object after deserialization.\n\n This method is called when the object is deserialized.\n It adds the `tqdm` attribute back to the object's dictionary\n if the `pbar` attribute is True.\n\n Args:\n state:\n The deserialized state of the object\n \"\"\"\n if \"pbar\" in state and state[\"pbar\"]:\n state[\"tqdm\"] = None\n self.__dict__.update(state)\n\n def _get_split(self, df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"Creates split data by treatment column.\n\n Separate treatment column with 1 (treated) an 0 (untreated),\n scales and transforms treatment column\n\n Args:\n df:\n The input dataframe\n\n Returns:\n Tuple of dataframes - one for treated (df[self.treatment] == 1]) and\n one for untreated (df[self.treatment] == 0]). Drops self.outcomes and\n `self.treatment` columns\n\n \"\"\"\n logger.debug(\"Creating split data by treatment column\")\n\n treated = df[df[self.treatment] == 1].drop([self.treatment] + self.outcomes, axis=1)\n untreated = df[df[self.treatment] == 0].drop([self.treatment] + self.outcomes, axis=1)\n\n return treated, untreated\n\n def _predict_outcome(self, std_treated: pd.DataFrame, std_untreated: pd.DataFrame):\n \"\"\"Applies LinearRegression to input arrays.\n\n Calculate biases of treated and untreated values,\n creates dict of y - regular, matched and without bias.\n\n Args:\n std_treated:\n The dataframe of treated data\n std_untreated:\n The dataframe of untreated data\n\n \"\"\"\n logger.debug(\"Predicting target by Linear Regression\")\n\n start_time = dt.datetime.now()\n logger.debug(\"start --\")\n\n self.dict_outcome_untreated = {}\n self.dict_outcome_treated = {}\n df = self.df.drop(columns=self.info_col)\n\n for outcome in self.outcomes:\n y_untreated = df[df[self.treatment] == 0][outcome].to_numpy()\n y_treated = df[df[self.treatment] == 1][outcome].to_numpy()\n\n x_treated = std_treated.to_numpy()\n x_untreated = std_untreated.to_numpy()\n y_match_treated = np.array([y_untreated[idx].mean() for idx in self.treated_index])\n y_match_untreated = np.array([y_treated[idx].mean() for idx in self.untreated_index])\n x_match_treated = np.array([x_untreated[idx].mean(0) for idx in self.treated_index])\n x_match_untreated = np.array([x_treated[idx].mean(0) for idx in self.untreated_index])\n bias_coefs_c = bias_coefs(self.untreated_index, y_treated, x_treated)\n bias_coefs_t = bias_coefs(self.treated_index, y_untreated, x_untreated)\n bias_c = bias(x_untreated, x_match_untreated, bias_coefs_c)\n bias_t = bias(x_treated, x_match_treated, bias_coefs_t)\n\n y_match_treated_bias = y_treated - y_match_treated + bias_t\n y_match_untreated_bias = y_match_untreated - y_untreated - bias_c\n\n self.dict_outcome_untreated[outcome] = y_untreated\n self.dict_outcome_untreated[outcome + POSTFIX] = y_match_untreated\n self.dict_outcome_untreated[outcome + POSTFIX_BIAS] = y_match_untreated_bias\n\n self.dict_outcome_treated[outcome] = y_treated\n self.dict_outcome_treated[outcome + POSTFIX] = y_match_treated\n self.dict_outcome_treated[outcome + POSTFIX_BIAS] = y_match_treated_bias\n\n end_time = dt.datetime.now()\n total = dt.datetime.strptime(str(end_time - start_time), \"%H:%M:%S.%f\").strftime(\"%H:%M:%S\")\n logger.debug(f\"end -- [work time{total}]\")\n\n def _create_outcome_matched_df(self, dict_outcome: dict, is_treated: bool) -> pd.DataFrame:\n \"\"\"Creates dataframe with outcomes values and treatment.\n\n Args:\n dict_outcome:\n A dictionary containing outcomes\n is_treated:\n A boolean value indicating whether the outcome is treated or not\n\n Returns:\n A dataframe with matched outcome and treatment columns\n\n \"\"\"\n df_pred = pd.DataFrame(dict_outcome)\n df_pred[self.treatment] = int(is_treated)\n df_pred[self.treatment + POSTFIX] = int(not is_treated)\n\n return df_pred\n\n def _create_features_matched_df(self, index: np.ndarray, is_treated: bool) -> pd.DataFrame:\n \"\"\"Creates matched dataframe with features.\n\n Args:\n index:\n An array of indices\n is_treated:\n A boolean value indicating whether the outcome is treated or not\n\n\n Returns:\n A dataframe of matched features\n\n \"\"\"\n df = self.df.drop(columns=self.outcomes + self.info_col)\n\n if self.group_col is None:\n untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()\n converted_index = [untreated_index[i] for i in index]\n filtered = df.loc[df[self.treatment] == int(not is_treated)].values\n untreated_df = pd.DataFrame(\n data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=df.columns\n ) # добавить дату в данные и пофиксить баги с этим (тут ломалось)\n if self.info_col is not None and len(self.info_col) != 1:\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n else:\n ids = self.df[df[self.treatment] == int(not is_treated)][self.info_col].values.ravel()\n converted_index = [ids[i] for i in index]\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n treated_df[\"index\"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()\n else:\n df = df.sort_values([self.treatment, self.group_col])\n untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()\n converted_index = [untreated_index[i] for i in index]\n filtered = df.loc[df[self.treatment] == int(not is_treated)]\n cols_untreated = [col for col in filtered.columns if col != self.group_col]\n filtered = filtered.drop(columns=self.group_col).to_numpy()\n untreated_df = pd.DataFrame(\n data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=cols_untreated\n )\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n grp = treated_df[self.group_col]\n untreated_df[self.group_col] = grp\n if self.info_col is not None and len(self.info_col) != 1:\n untreated_df[\"index\"] = pd.Series(converted_index)\n else:\n ids = (\n self.df[df[self.treatment] == int(not is_treated)]\n .sort_values([self.treatment, self.group_col])[self.info_col]\n .values.ravel()\n )\n converted_index = [ids[i] for i in index]\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df[\"index\"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()\n untreated_df.columns = [col + POSTFIX for col in untreated_df.columns]\n\n x = pd.concat([treated_df, untreated_df], axis=1).drop(\n columns=[self.treatment, self.treatment + POSTFIX], axis=1\n )\n return x\n\n def _create_matched_df(self) -> pd.DataFrame:\n \"\"\"Creates matched df of features and outcome.\n\n Returns:\n Matched dataframe\n \"\"\"\n df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)\n df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)\n\n df_matched = pd.concat([df_pred_treated, df_pred_untreated])\n\n treated_x = self._create_features_matched_df(self.treated_index, True)\n untreated_x = self._create_features_matched_df(self.untreated_index, False)\n\n untreated_x = pd.concat([treated_x, untreated_x])\n\n columns = list(untreated_x.columns) + list(df_matched.columns)\n\n df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)\n df_matched.columns = columns\n\n return df_matched\n\n def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:\n \"\"\"Calculates Average Treatment Effect for the control group (ATC).\n\n Effect on control group if it was affected\n\n Args:\n df:\n Input dataframe\n outcome:\n The outcome to be considered for treatment effect\n\n Returns:\n Contains ATC, scaled counts, and variances as numpy arrays\n\n \"\"\"\n logger.debug(\"Calculating ATC\")\n\n df = df[df[self.treatment] == 0]\n N_c = len(df)\n ITT_c = df[outcome + POSTFIX_BIAS]\n scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)\n\n vars_c = np.repeat(ITT_c.var(), N_c) # conservative\n atc = ITT_c.mean()\n\n return atc, scaled_counts_c, vars_c\n\n def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:\n \"\"\"Calculates Average Treatment Effect for the treated (ATT).\n\n Args:\n df:\n Input dataframe\n outcome:\n The outcome to be considered for treatment effect\n\n Returns:\n Contains ATT, scaled counts, and variances as numpy arrays\n\n \"\"\"\n logger.debug(\"Calculating ATT\")\n\n df = df[df[self.treatment] == 1]\n N_t = len(df)\n ITT_t = df[outcome + POSTFIX_BIAS]\n scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)\n\n vars_t = np.repeat(ITT_t.var(), N_t) # conservative\n att = ITT_t.mean()\n\n return att, scaled_counts_t, vars_t\n\n def _calculate_ate_all_target(self, df: pd.DataFrame):\n \"\"\"Creates dictionaries of all effect: ATE, ATC, ATT.\n\n Args:\n df:\n Input dataframe\n\n \"\"\"\n logger.debug(\"Creating dicts of all effects: ATE, ATC, ATT\")\n\n att_dict = {}\n atc_dict = {}\n ate_dict = {}\n N = len(df)\n N_t = df[self.treatment].sum()\n N_c = N - N_t\n\n for outcome in self.outcomes:\n att, scaled_counts_t, vars_t = self.calc_att(df, outcome)\n atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)\n ate = (N_c / N) * atc + (N_t / N) * att\n\n att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)\n atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)\n ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)\n\n ate_dict[outcome] = [\n ate,\n ate_se,\n pval_calc(ate / ate_se),\n ate - self.sigma * ate_se,\n ate + self.sigma * ate_se,\n ]\n atc_dict[outcome] = [\n atc,\n atc_se,\n pval_calc(atc / atc_se),\n atc - self.sigma * atc_se,\n atc + self.sigma * atc_se,\n ]\n att_dict[outcome] = [\n att,\n att_se,\n pval_calc(att / att_se),\n att - self.sigma * att_se,\n att + self.sigma * att_se,\n ]\n\n self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict\n self.val_dict = ate_dict\n\n def matching_quality(self, df_matched) -> Dict[str, Union[Dict[str, float], float]]:\n \"\"\"Estimated the quality of covariates balance and repeat fraction.\n\n Calculates population stability index,Standardized mean difference\n and Kolmogorov-Smirnov test for numeric values. Returns a dictionary of reports.\n\n Args:\n df_matched:\n Matched DataFrame to calculate quality\n\n Returns:\n dictionary containing PSI, KS-test, SMD data and repeat fractions\n\n \"\"\"\n if self.silent:\n logger.debug(\"Estimating quality of matching\")\n else:\n logger.info(\"Estimating quality of matching\")\n\n psi_columns = set(self.columns_match)\n psi_columns = list(psi_columns - set([self.treatment] + self.outcomes))\n psi_data, ks_data, smd_data = matching_quality(\n df_matched, self.treatment, sorted(self.features_quality), sorted(psi_columns), self.silent\n )\n\n rep_dict = {\n \"match_control_to_treat\": check_repeats(np.concatenate(self.treated_index), silent=self.silent),\n \"match_treat_to_control\": check_repeats(np.concatenate(self.untreated_index), silent=self.silent),\n }\n\n self.quality_dict = {\"psi\": psi_data, \"ks_test\": ks_data, \"smd\": smd_data, \"repeats\": rep_dict}\n\n rep_df = pd.DataFrame.from_dict(rep_dict, orient=\"index\").rename(columns={0: \"value\"})\n self.rep_dict = rep_df\n\n if self.silent:\n logger.debug(f\"PSI info: \\n {psi_data.head(10)} \\nshape:{psi_data.shape}\")\n logger.debug(f\"Kolmogorov-Smirnov test info: \\n {ks_data.head(10)} \\nshape:{ks_data.shape}\")\n logger.debug(f\"Standardised mean difference info: \\n {smd_data.head(10)} \\nshape:{smd_data.shape}\")\n logger.debug(f\"Repeats info: \\n {rep_df.head(10)}\")\n else:\n logger.info(f\"PSI info: \\n {psi_data.head(10)} \\nshape:{psi_data.shape}\")\n logger.info(f\"Kolmogorov-Smirnov test info: \\n {ks_data.head(10)} \\nshape:{ks_data.shape}\")\n logger.info(f\"Standardised mean difference info: \\n {smd_data.head(10)} \\nshape:{smd_data.shape}\")\n logger.info(f\"Repeats info: \\n {rep_df.head(10)}\")\n\n return self.quality_dict\n\n def group_match(self):\n \"\"\"Matches the dataframe if it divided by groups.\n\n Returns:\n A tuple containing the matched dataframe and metrics such as ATE, ATT and ATC\n\n \"\"\"\n df = self.df.drop(columns=self.info_col)\n groups = sorted(df[self.group_col].unique())\n matches_c = []\n matches_t = []\n group_arr_c = df[df[self.treatment] == 0][self.group_col].to_numpy()\n group_arr_t = df[df[self.treatment] == 1][self.group_col].to_numpy()\n treat_arr_c = df[df[self.treatment] == 0][self.treatment].to_numpy()\n treat_arr_t = df[df[self.treatment] == 1][self.treatment].to_numpy()\n\n if self.pbar:\n self.tqdm = tqdm(total=len(groups) * 2)\n\n for group in groups:\n df_group = df[df[self.group_col] == group]\n temp = df_group[self.columns_match + [self.group_col]]\n temp = temp.loc[:, (temp != 0).any(axis=0)].drop(columns=self.group_col)\n treated, untreated = self._get_split(temp)\n\n std_treated_np, std_untreated_np = _transform_to_np(treated, untreated, self.weights)\n\n if self.pbar:\n self.tqdm.set_description(desc=f\"Get untreated index by group {group}\")\n matches_u_i = _get_index(std_treated_np, std_untreated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(1)\n self.tqdm.set_description(desc=f\"Get treated index by group {group}\")\n matches_t_i = _get_index(std_untreated_np, std_treated_np, self.n_neighbors)\n if self.pbar:\n self.tqdm.update(1)\n self.tqdm.refresh()\n\n group_mask_c = group_arr_c == group\n group_mask_t = group_arr_t == group\n matches_c_mask = np.arange(treat_arr_t.shape[0])[group_mask_t]\n matches_u_i = [matches_c_mask[i] for i in matches_u_i]\n matches_t_mask = np.arange(treat_arr_c.shape[0])[group_mask_c]\n matches_t_i = [matches_t_mask[i] for i in matches_t_i]\n matches_c.extend(matches_u_i)\n matches_t.extend(matches_t_i)\n\n if self.pbar:\n self.tqdm.close()\n\n self.untreated_index = matches_c\n self.treated_index = matches_t\n\n df_group = df[self.columns_match].drop(columns=self.group_col)\n treated, untreated = self._get_split(df_group)\n self._predict_outcome(treated, untreated)\n df_matched = self._create_matched_df()\n self._calculate_ate_all_target(df_matched)\n\n if self.validation:\n return self.val_dict\n\n return self.report_view(), df_matched\n\n def match(self):\n \"\"\"Matches the dataframe.\n\n Returns:\n A tuple containing the matched dataframe and metrics such as ATE, ATT and ATC\n\n \"\"\"\n if self.group_col is not None:\n return self.group_match()\n\n df = self.df[self.columns_match]\n treated, untreated = self._get_split(df)\n\n std_treated_np, std_untreated_np = _transform_to_np(treated, untreated, self.weights)\n\n if self.pbar:\n self.tqdm = tqdm(total=len(std_treated_np) + len(std_untreated_np))\n self.tqdm.set_description(desc=\"Get untreated index\")\n\n untreated_index = _get_index(std_treated_np, std_untreated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(len(std_treated_np))\n self.tqdm.set_description(desc=\"Get treated index\")\n treated_index = _get_index(std_untreated_np, std_treated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(len(std_untreated_np))\n self.tqdm.refresh()\n self.tqdm.close()\n\n self.untreated_index = untreated_index\n self.treated_index = treated_index\n\n self._predict_outcome(treated, untreated)\n\n df_matched = self._create_matched_df()\n self._calculate_ate_all_target(df_matched)\n\n if self.validation:\n return self.val_dict\n\n return self.report_view(), df_matched\n\n def report_view(self) -> pd.DataFrame:\n \"\"\"Formats the ATE, ATC, and ATT results into a Pandas DataFrame for easy viewing.\n\n Returns:\n DataFrame containing ATE, ATC, and ATT results\n \"\"\"\n result = (self.ATE, self.ATC, self.ATT)\n\n for outcome in self.outcomes:\n res = pd.DataFrame(\n [x[outcome] + [outcome] for x in result],\n columns=[\"effect_size\", \"std_err\", \"p-val\", \"ci_lower\", \"ci_upper\", \"outcome\"],\n index=[\"ATE\", \"ATC\", \"ATT\"],\n )\n self.results = pd.concat([self.results, res])\n return self.results" }, { "identifier": "MatcherNoReplacement", "path": "hypex/algorithms/no_replacement_matching.py", "snippet": "class MatcherNoReplacement:\n \"\"\"Matching groups with no replacement.\n\n Realized by optimizing the linear sum of distances between pairs of treatment and\n control samples.\n \"\"\"\n\n def __init__(self, X: pd.DataFrame, a: pd.Series, weights: dict = None, approximate_match: bool = False):\n \"\"\"Initialize matching.\n\n Args:\n X: features dataframe\n a: series of treatment value\n weights: weights for numeric columns in order to increase matching quality.\n approximate_match: use or not approximate matching\n \"\"\"\n self.treatment = a\n self.X = X\n self.weights = weights\n self.approximate_match = approximate_match\n\n def match(self):\n \"\"\"Function run matching with no replacement.\n\n Returns:\n Dataframe of matched indexes.\n \"\"\"\n matches = {}\n cov = conditional_covariance(self.X[self.treatment == 1].values, self.X[self.treatment == 0].values)\n distance_matrix = self._get_distance_matrix(self.X[self.treatment == 1], self.X[self.treatment == 0], cov)\n source_array, neighbor_array_indices, distances = optimally_match_distance_matrix(distance_matrix)\n source_df = self.X[self.treatment == 1].iloc[np.array(source_array)]\n target_df = self.X[self.treatment == 0].iloc[np.array(neighbor_array_indices)]\n\n matches[1] = self.create_match_df(self.treatment, source_df, target_df, distances)\n matches[0] = self.create_match_df(self.treatment, target_df, source_df, distances)\n\n match_df = pd.concat(matches, sort=True)\n return match_df\n\n def create_match_df(\n self, base_series: pd.Series, source_df: pd.DataFrame, target_df: pd.DataFrame, distances: list\n ) -> pd.DataFrame:\n \"\"\"Function creates matching dataframe.\n\n Args:\n base_series: series of treatment value.\n source_df: dataframe of sources indexes.\n target_df: dataframe of target indexes.\n distances: matrix of calculated distances.\n\n Returns:\n Matched dataframe of indexes.\n \"\"\"\n match_sub_df = pd.DataFrame(\n index=base_series.index,\n columns=[\n \"matches\",\n \"distances\",\n ],\n data=base_series.apply(lambda x: pd.Series([[], []])).values,\n dtype=\"object\",\n )\n\n # matching from source to target: read distances\n match_sub_df.loc[source_df.index] = pd.DataFrame(\n data=dict(\n matches=[[tidx] for tidx in target_df.index],\n distances=distances,\n ),\n index=source_df.index,\n )\n\n # matching from target to target: fill with zeros\n match_sub_df.loc[target_df.index] = pd.DataFrame(\n data=dict(\n matches=[[tidx] for tidx in target_df.index],\n distances=[[0]] * len(distances),\n ),\n index=target_df.index,\n )\n return match_sub_df\n\n def _get_metric_dict(self, cov: np.ndarray) -> dict:\n \"\"\"Function calculates correct feature space and generate metrics dist for cdist calculation.\n\n Args:\n cov: Matrix of covariations.\n\n Returns:\n Metric dictionary\n \"\"\"\n metric_dict = dict(metric=\"mahalanobis\")\n mahalanobis_transform = np.linalg.inv(cov)\n if self.weights is not None:\n features = self.X.columns\n w_list = np.array([self.weights[col] if col in self.weights.keys() else 1 for col in features])\n w_matrix = np.sqrt(np.diag(w_list / w_list.sum()))\n mahalanobis_transform = np.dot(w_matrix, mahalanobis_transform)\n\n metric_dict[\"VI\"] = mahalanobis_transform\n return metric_dict\n\n def _get_distance_matrix(self, source_df: pd.DataFrame, target_df: pd.DataFrame, cov: np.ndarray) -> np.ndarray:\n \"\"\"Create distance matrix for no replacement match.\n\n Combines metric and source/target data into a\n precalculated distance matrix which can be passed to\n scipy.optimize.linear_sum_assignment.\n\n Args:\n source_df: source feature dataframe.\n target_df: target feature dataframe.\n cov: matrix of covariations.\n\n Returns:\n Matrix of distances.\n \"\"\"\n cdist_args = dict(XA=_ensure_array_columnlike(source_df.values), XB=_ensure_array_columnlike(target_df.values))\n cdist_args.update(self._get_metric_dict(cov))\n\n if self.approximate_match:\n if len(cdist_args['XB']) < len(cdist_args['XA']):\n covariance_matrix = np.cov(cdist_args['XB'].T)\n else:\n covariance_matrix = np.cov(cdist_args['XA'].T)\n covariance_matrix_reg = covariance_matrix + np.eye(covariance_matrix.shape[0]) * 1e-8\n\n distance_matrix = np.zeros((cdist_args['XA'].shape[0], cdist_args['XB'].shape[0]))\n for i, x in enumerate(cdist_args['XA']):\n distance_matrix[i] = _m_distance(cdist_args['XB'], x, np.linalg.inv(covariance_matrix_reg))\n else:\n distance_matrix = distance.cdist(**cdist_args)\n return distance_matrix" }, { "identifier": "FeatureSelector", "path": "hypex/selectors/feature_selector.py", "snippet": "class FeatureSelector:\n \"\"\"Class of LAMA Feature selector. Select top features. By default, use LGM.\n # TODO: write some feature selector\"\"\"\n\n def __init__(\n self,\n outcome: str,\n outcome_type: str,\n treatment: str,\n timeout: int,\n n_threads: int,\n n_folds: int,\n verbose: bool, # не используется\n generate_report: bool,\n report_dir: str,\n use_algos: List[str],\n ):\n \"\"\"Initialize the LamaFeatureSelector.\n\n Args:\n outcome:\n The target column\n outcome_type:\n The type of target column\n treatment:\n The column that determines control and test groups\n timeout:\n Time limit for the execution of the code\n n_threads:\n Maximum number of threads to be used\n n_folds:\n Number of folds for cross-validation\n verbose:\n Flag to control the verbosity of the process stages\n generate_report:\n Flag to control whether to create a report or not\n report_dir:\n Directory for storing report files\n use_algos:\n List of names of LAMA algorithms for feature selection\n \"\"\"\n self.outcome = outcome\n self.outcome_type = outcome_type\n self.treatment = treatment\n self.use_algos = use_algos\n self.timeout = timeout\n self.n_threads = n_threads\n self.n_folds = n_folds\n self.verbose = verbose\n self.generate_report = generate_report\n self.report_dir = report_dir\n\n def perform_selection(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Trains a model and returns feature scores.\n\n This method defines metrics, applies the model, creates a report, and returns feature scores\n\n Args:\n df:\n Input data\n\n Returns:\n A DataFrame containing the feature scores from the model\n\n \"\"\"\n roles = {\n \"target\": self.outcome,\n \"drop\": [self.treatment],\n }\n\n if self.outcome_type == \"numeric\":\n task_name = \"reg\"\n loss = \"mse\"\n metric = \"mse\"\n elif self.outcome_type == \"binary\":\n task_name = \"binary\"\n loss = \"logloss\"\n metric = \"logloss\"\n else:\n task_name = \"multiclass\"\n loss = \"crossentropy\"\n metric = \"crossentropy\"\n\n features_scores = []\n\n return features_scores" }, { "identifier": "SpearmanFilter", "path": "hypex/selectors/spearman_filter.py", "snippet": "class SpearmanFilter:\n \"\"\"Class to filter columns based on the Spearman correlation coefficient.\n\n The class is utilized to filter dataframe columns that do not exhibit a significant\n correlation (based on a provided threshold) with a specified outcome column.\n The significance of the correlation is determined using the Spearman correlation coefficient\n and a p-value threshold of 0.05\n \"\"\"\n\n def __init__(self, outcome: str, treatment: str, threshold: float):\n \"\"\"Initialize spearman filter.\n\n Args:\n outcome:\n The name of target column\n treatment:\n The name of the column that determines control and test groups\n threshold:\n The threshold for the Spearman correlation coefficient filter\n \"\"\"\n self.outcome: str = outcome\n self.treatment: str = treatment\n self.threshold: float = threshold\n\n def perform_filter(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Filters columns based on their correlation with the outcome column.\n\n The method tests the correlation using the Spearman correlation coefficient.\n Columns that have an absolute correlation coefficient value less than the provided threshold,\n and a p-value less than 0.05, are considered insignificant and are removed from the dataframe\n\n Args:\n df:\n The input DataFrame\n\n Returns:\n The filtered DataFrame, containing only columns that\n are significantly correlated with the outcome column\n \"\"\"\n selected = []\n columns = df.drop([self.treatment, self.outcome], 1).columns\n for column in columns:\n result = spearmanr(df[self.outcome].values, df[column].values)\n if (abs(result[0] < self.threshold)) and (result[1] < PVALUE):\n selected.append(column)\n\n logger.info(f\"Drop columns {list(set(columns) - set(selected))}\")\n\n columns = selected + [self.treatment, self.outcome]\n df = df[columns]\n\n return df" }, { "identifier": "OutliersFilter", "path": "hypex/selectors/outliers_filter.py", "snippet": "class OutliersFilter:\n \"\"\"Class of Outliers Filter. It creates a row indices that should be deleted by percentile.\"\"\"\n\n def __init__(self, interquartile_coeff, mode_percentile, min_percentile, max_percentile):\n \"\"\"Initializes the OutliersFilter.\n\n Args:\n interquartile_coeff:\n Coefficient for the interquartile range to determine outliers\n mode_percentile:\n If True, outliers are determined by custom percentiles\n min_percentile:\n The lower percentile. Values below this percentile are considered outliers.\n max_percentile:\n The upper percentile. Values above this percentile are considered outliers\n \"\"\"\n self.interquartile_coeff = interquartile_coeff\n self.mode_percentile = mode_percentile\n self.min_percentile = min_percentile\n self.max_percentile = max_percentile\n\n def perform_filter(self, df: pd.DataFrame, interquartile: bool = True) -> pd.DataFrame:\n \"\"\"Identifies rows with outliers.\n\n This method creates a set of row indices to be removed, which contains values less than\n `min_percentile` and larger than `max_percentile` (if `mode_percentile` is True), or values\n smaller than the 0.2 and larget than 0.8 (if `mode_percentile` is False)\n\n Args:\n df:\n The input DataFrame\n interquartile:\n If True, uses the interquartile range to determine outliers. Defaults to True\n\n Returns:\n The set of row indices with outliers\n \"\"\"\n columns_names = df.select_dtypes(include=\"number\").columns\n rows_for_del = []\n for column in columns_names:\n if self.mode_percentile:\n min_value = df[column].quantile(self.min_percentile)\n max_value = df[column].quantile(self.max_percentile)\n elif interquartile:\n upper_quantile = df[column].quantile(0.8)\n lower_quantile = df[column].quantile(0.2)\n\n interquartile_range = upper_quantile - lower_quantile\n min_value = lower_quantile - self.interquartile_coeff * interquartile_range\n max_value = upper_quantile + self.interquartile_coeff * interquartile_range\n else:\n mean_value = df[column].mean()\n standard_deviation = df[column].std()\n nstd_lower, nstd_upper = 3, 3\n\n min_value = mean_value - nstd_lower * standard_deviation\n max_value = mean_value + nstd_upper * standard_deviation\n\n rows_for_del_column = (df[column] < min_value) | (df[column] > max_value)\n rows_for_del_column = df.index[rows_for_del_column].tolist()\n rows_for_del.extend(rows_for_del_column)\n rows_for_del = list(set(rows_for_del))\n logger.info(f\"Drop {len(rows_for_del)} rows\")\n return df.drop(rows_for_del)" }, { "identifier": "const_filtration", "path": "hypex/selectors/base_filtration.py", "snippet": "def const_filtration(X: pd.DataFrame, threshold: float = 0.95) -> list:\n \"\"\"Function removes features consist of constant value on 95%.\n\n Args:\n X: related dataset\n threshold: constant fill rate, default is 0.95\n\n Returns:\n List of filtered columns\n \"\"\"\n is_const = pd.Series(0, index=X.columns, dtype=np.dtype(bool))\n for col in X.columns:\n # NaNs are not counted using unique (since np.nan != np.nan). Fill them with a unique value:\n cur_col = X.loc[:, col]\n cur_col.loc[~np.isfinite(cur_col)] = cur_col.max() + 1\n # Get values' frequency:\n freqs = cur_col.value_counts(normalize=True)\n is_const[col] = np.any(freqs > threshold)\n\n selected_features = ~is_const\n if np.sum(selected_features) == 0:\n raise AssertionError(\"All features were removed by constant filtration.\")\n else:\n return X.loc[:, selected_features].columns.to_list()" }, { "identifier": "nan_filtration", "path": "hypex/selectors/base_filtration.py", "snippet": "def nan_filtration(X: pd.DataFrame, threshold: float = 0.8):\n \"\"\"Function removes features consist of NaN value on 80%.\n\n Args:\n X: related dataset\n threshold: constant fill rate, default is 0.95\n\n Returns:\n List of filtered columns\n \"\"\"\n nan_freqs = np.mean(pd.isnull(X), axis=0)\n is_sparse = nan_freqs > threshold\n selected_features = ~is_sparse\n if np.sum(selected_features) == 0:\n raise AssertionError(\"All features were removed by nan filtration.\")\n else:\n return X.loc[:, selected_features].columns.to_list()" }, { "identifier": "random_feature", "path": "hypex/utils/validators.py", "snippet": "def random_feature(df: pd.DataFrame):\n \"\"\"Adds a random feature to the initial dataset.\n\n Args:\n df:\n The initial dataframe\n\n Returns:\n The modified dataframe with an additional random feature\n A validation flag\n \"\"\"\n feature = np.random.normal(0, 1, size=len(df))\n validate = 1\n df[\"random_feature\"] = feature\n return df, validate" }, { "identifier": "random_treatment", "path": "hypex/utils/validators.py", "snippet": "def random_treatment(df: pd.DataFrame, treatment: str):\n \"\"\"Replaces real treatment with a random placebo treatment.\n\n Args:\n df:\n The initial dataframe\n treatment:\n The columns name representing the treatment\n\n Returns:\n The modified dataframe with the original treatment replaced\n The original treatment series\n A validation flag\n \"\"\"\n prop1 = df[treatment].sum() / df.shape[0]\n prop0 = 1 - prop1\n new_treatment = np.random.choice([0, 1], size=df.shape[0], p=[prop0, prop1])\n validate = 1\n orig_treatment = df[treatment]\n df = df.drop(columns=treatment)\n df[treatment] = new_treatment\n return df, orig_treatment, validate" }, { "identifier": "subset_refuter", "path": "hypex/utils/validators.py", "snippet": "def subset_refuter(df: pd.DataFrame, treatment: str, fraction: float = 0.8):\n \"\"\"Returns a subset of data with given fraction (default 0.8).\n\n Args:\n df:\n The initial dataframe\n treatment:\n The column name representing the treatment\n fraction:\n The fraction of the dataset to divide random matching\n\n Returns:\n The subset of the dataframe\n A validation flag\n \"\"\"\n df = df.groupby(treatment, group_keys=False).apply(lambda x: x.sample(frac=fraction))\n validate = 1\n return df, validate" }, { "identifier": "test_significance", "path": "hypex/utils/validators.py", "snippet": "def test_significance(estimate: float, simulations: List) -> float:\n \"\"\"Performs a significance test for a normal distribution.\n\n Args:\n estimate:\n The estimated effect\n simulations:\n A list of estimated effects from each simulation\n\n Returns:\n The p-value of the test\n \"\"\"\n mean_refute_value = np.mean(simulations)\n std_dev_refute_values = np.std(simulations)\n z_score = (estimate - mean_refute_value) / std_dev_refute_values\n\n if z_score > 0: # Right Tail\n p_value = 1 - st.norm.cdf(z_score)\n else: # Left Tail\n p_value = st.norm.cdf(z_score)\n\n return p_value" } ]
import logging import pickle import numpy as np import pandas as pd from typing import Union from tqdm.auto import tqdm from .algorithms.faiss_matcher import FaissMatcher from .algorithms.no_replacement_matching import MatcherNoReplacement from .selectors.feature_selector import FeatureSelector from .selectors.spearman_filter import SpearmanFilter from .selectors.outliers_filter import OutliersFilter from .selectors.base_filtration import const_filtration, nan_filtration from .utils.validators import random_feature from .utils.validators import random_treatment from .utils.validators import subset_refuter from .utils.validators import test_significance
12,387
info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration:
"""Base Matcher class.""" REPORT_FEAT_SELECT_DIR = "report_feature_selector" REPORT_PROP_MATCHER_DIR = "report_matcher" NAME_REPORT = "lama_interactive_report.html" N_THREADS = 1 N_FOLDS = 4 RANDOM_STATE = 123 TEST_SIZE = 0.2 TIMEOUT = 600 VERBOSE = 2 USE_ALGOS = ["lgb"] PROP_SCORES_COLUMN = "prop_scores" GENERATE_REPORT = True SAME_TARGET_THRESHOLD = 0.7 OUT_INTER_COEFF = 1.5 OUT_MODE_PERCENT = True OUT_MIN_PERCENT = 0.02 OUT_MAX_PERCENT = 0.98 logger = logging.getLogger("hypex") console_out = logging.StreamHandler() logging.basicConfig( handlers=(console_out,), format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s", datefmt="%d.%m.%Y %H:%M:%S", level=logging.INFO, ) class Matcher: """Class for compile full pipeline of Matching in Causal Inference task. Matcher steps: - Read, analyze data - Feature selection via LightAutoML - Converting a dataset with features to another space via Cholesky decomposition In the new space, the distance L2 becomes equivalent to the Mahalanobis distance. This allows us to use faiss to search for nearest objects, which can search only by L2 metric, but without violating the methodology of matching, for which it is important to count by the Mahalanobis distance - Finding the nearest neighbors for each unit (with duplicates) using faiss. For each of the control group, neighbors from the target group are matched and vice versa. - Calculation bias - Creating matched df (Wide df with pairs) - Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals - Calculation quality: PS-test, KS test, SMD test - Returns metrics as dataframe, quality results as dict of df's and df_matched - After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result` Example: Common usecase - base pipeline for matching >>> # Base info >>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1 >>> target = "target" # Column name with target >>> >>> # Optional >>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative. >>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature) >>> >>> # Matching >>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col) >>> features = model.lama_feature_select() # Feature selection via lama >>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching >>> >>> model.validate_result() """ def __init__( self, input_data: pd.DataFrame, treatment: str, outcome: Union[str, list] = None, outcome_type: str = "numeric", group_col: str = None, info_col: list = None, weights: dict = None, base_filtration: bool = False, generate_report: bool = GENERATE_REPORT, report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR, timeout: int = TIMEOUT, n_threads: int = N_THREADS, n_folds: int = N_FOLDS, verbose: bool = VERBOSE, use_algos: list = None, same_target_threshold: float = SAME_TARGET_THRESHOLD, interquartile_coeff: float = OUT_INTER_COEFF, drop_outliers_by_percentile: bool = OUT_MODE_PERCENT, min_percentile: float = OUT_MIN_PERCENT, max_percentile: float = OUT_MAX_PERCENT, n_neighbors: int = 1, silent: bool = True, pbar: bool = True, ): """Initialize the Matcher object. Args: input_data: Input dataframe outcome: Target column treatment: Column determine control and test groups outcome_type: Values type of target column. Defaults to "numeric" group_col: Column for grouping. Defaults to None. info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration:
filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop))
5
2023-11-01 08:58:57+00:00
16k
tianhaowuhz/human-assisting-dex-grasp
Runners/TrainGFPPO.py
[ { "identifier": "GFPPO", "path": "Algorithms/ppo/gf_ppo_update.py", "snippet": "class GFPPO:\n def __init__(self,\n vec_env,\n cfg_train,\n device='cpu',\n sampler='sequential',\n log_dir='run',\n is_testing=False,\n print_log=True,\n apply_reset=False,\n asymmetric=False,\n args=None,\n ):\n self.args = args\n ''' PPO '''\n # PPO parameters\n if not isinstance(vec_env.observation_space, Space):\n raise TypeError(\"vec_env.observation_space must be a gym Space\")\n if not isinstance(vec_env.state_space, Space):\n raise TypeError(\"vec_env.state_space must be a gym Space\")\n if not isinstance(vec_env.action_space, Space):\n raise TypeError(\"vec_env.action_space must be a gym Space\")\n self.observation_space = vec_env.observation_space\n self.action_space = vec_env.action_space\n self.state_space = vec_env.state_space\n self.cfg_train = copy.deepcopy(cfg_train)\n learn_cfg = self.cfg_train[\"learn\"]\n self.device = device\n self.asymmetric = asymmetric\n self.desired_kl = learn_cfg.get(\"desired_kl\", None)\n self.schedule = learn_cfg.get(\"schedule\", \"fixed\")\n self.step_size = learn_cfg[\"optim_stepsize\"]\n self.init_noise_std = learn_cfg.get(\"init_noise_std\", 0.3)\n self.model_cfg = self.cfg_train[\"policy\"]\n self.num_transitions_per_env=learn_cfg[\"nsteps\"]\n self.learning_rate=learn_cfg[\"optim_stepsize\"]\n\n self.clip_param = learn_cfg[\"cliprange\"]\n self.num_learning_epochs = learn_cfg[\"noptepochs\"]\n self.num_mini_batches = learn_cfg[\"nminibatches\"]\n self.value_loss_coef = learn_cfg.get(\"value_loss_coef\", 2.0)\n self.entropy_coef = learn_cfg[\"ent_coef\"]\n self.gamma = learn_cfg[\"gamma\"]\n self.lam = learn_cfg[\"lam\"]\n self.max_grad_norm = learn_cfg.get(\"max_grad_norm\", 2.0)\n self.use_clipped_value_loss = learn_cfg.get(\"use_clipped_value_loss\", False)\n\n # policy type \n self.action_type = self.cfg_train[\"setting\"][\"action_type\"]\n self.sub_action_type = self.cfg_train[\"setting\"][\"sub_action_type\"]\n self.action_clip = self.cfg_train[\"setting\"][\"action_clip\"]\n self.grad_process = self.cfg_train[\"setting\"][\"grad_process\"]\n self.grad_scale = self.cfg_train[\"setting\"][\"grad_scale\"]\n\n if self.action_type=='joint' and self.sub_action_type=='add+jointscale':\n action_space_shape = (18+18,)\n else:\n action_space_shape = self.action_space.shape\n print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n self.vec_env = vec_env\n self.vec_env.grad_scale = self.grad_scale\n \n pointnet_version = self.cfg_train[\"policy\"][\"pointnet_version\"]\n\n hand_pcl = self.cfg_train[\"policy\"][\"hand_pcl\"]\n hand_model = None\n\n # PPO components\n self.stack_frame_numer = self.vec_env.stack_frame_numbers\n self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape,\n self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, \n sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, \n envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args)\n\n # pointnet backbone\n \n self.pointnet_finetune = self.model_cfg['finetune_pointnet']\n self.finetune_pointnet_bz = 128\n if self.model_cfg['pretrain_pointnet']:\n if pointnet_version == 'pt2':\n pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device)\n elif pointnet_version == 'pt':\n pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device)\n if self.model_cfg['shared_pointnet']:\n self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict)\n if not self.model_cfg['finetune_pointnet']:\n # freeze pointnet\n for name,param in self.actor_critic.pointnet_enc.named_parameters():\n param.requires_grad = False\n else:\n self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict)\n self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict)\n\n if not self.model_cfg['finetune_pointnet']:\n # freeze pointnet\n for name,param in self.actor_critic.actor_pointnet_enc.named_parameters():\n param.requires_grad = False\n for name,param in self.actor_critic.critic_pointnet_enc.named_parameters():\n param.requires_grad = False\n\n self.actor_critic.to(self.device)\n self.storage = RolloutStorage(self.vec_env.num_envs, self.num_transitions_per_env, self.observation_space.shape,\n self.state_space.shape, action_space_shape, self.device, sampler)\n \n self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.actor_critic.parameters()), lr=self.learning_rate)\n\n ''' SDE '''\n if 'gf' in self.vec_env.sub_obs_type:\n # init SDE config\n self.prior_fn, self.marginal_prob_fn, self.sde_fn = init_sde(\"vp\")\n self.score = CondScoreModel(\n self.marginal_prob_fn,\n hidden_dim=args.hidden_dim,\n embed_dim=args.embed_dim,\n mode=args.score_mode,\n relative=args.relative,\n space=args.space,\n pointnet_version='pt2',\n )\n model_dict = torch.load(os.path.join(args.score_model_path,'score.pt'))\n self.score.load_state_dict(model_dict)\n self.score.to(device)\n self.score.eval()\n self.points_per_object = args.points_per_object\n self.t0 = args.t0\n self.ori_grad = None\n\n ''' Log '''\n # self.log_dir = log_dir\n if self.args.model_dir != \"\" and self.vec_env.mode=='train':\n time_now = self.args.model_dir.split('/')[8].split('_')[0] \n else:\n time_now = time.strftime('%m-%d-%H-%M',time.localtime(time.time()))\n\n self.log_dir = os.path.join(f\"./logs/{args.exp_name}/{time_now}_handrot:{self.vec_env.hand_rotation}_t0:{self.t0}_sfn:{self.vec_env.stack_frame_numbers}_{self.vec_env.num_envs}ne_{len(self.vec_env.shapes_all)}obj_gpt:{self.grad_process}_gs:{self.grad_scale}_at:{self.action_type}_subat:{self.sub_action_type}_rt:{self.vec_env.reward_type}_rn:{self.vec_env.reward_normalize}_simfreq:{self.vec_env.similarity_reward_freq}_cd:{self.vec_env.close_dis}_pts:{pointnet_version}_seed{args.seed}\")\n self.print_log = print_log\n self.writer = SummaryWriter(log_dir=self.log_dir, flush_secs=10)\n self.tot_timesteps = 0\n self.tot_time = 0\n self.is_testing = is_testing\n self.current_learning_iteration = 0\n\n if save_video:\n self.video_log_dir = os.path.join(self.log_dir,'video')\n os.makedirs(self.video_log_dir,exist_ok=True)\n self.vis_env_num = self.args.vis_env_num\n\n self.apply_reset = apply_reset\n\n ''' Evaluation '''\n if 'gf_check' in self.action_type:\n self.eval_round = 20\n else:\n self.eval_round = 5\n\n if self.vec_env.mode == 'eval':\n self.eval_round = self.args.eval_times\n\n if save_state:\n self.eval_metrics = {\n 'obj_shapes':[],\n 'time_step':[],\n 'success_rate':[],\n 'gt_dist':[],\n 'stability':[],\n 'lift_nums':np.zeros(self.vec_env.num_envs),\n 'gf_state_init':[],\n 'gf_state_final':[],\n 'gf_state_gt':[],\n }\n else:\n self.eval_metrics = {\n 'obj_shapes':[],\n 'time_step':[],\n 'success_rate':[],\n 'gt_dist':[],\n 'stability':[],\n 'lift_nums':np.zeros(self.vec_env.num_envs),\n 'obj_translation':[],\n 'obj_cosine_similarity':[],\n }\n self.eval_metrics['obj_shapes'] = self.vec_env.object_types\n\n def test(self, path):\n self.actor_critic.load_state_dict(torch.load(path, map_location=self.device))\n self.actor_critic.eval()\n\n def load(self, path):\n self.actor_critic.load_state_dict(torch.load(path, map_location=self.device))\n self.current_learning_iteration = int(path.split(\"_\")[-1].split(\".\")[0])\n self.actor_critic.train()\n\n model_dir = path[:-len(path.split('/')[-1])] + f\"metric_{self.args.exp_name}_{self.args.seed}.pkl\"\n self.eval_metrics = CPickle.load(open(model_dir, 'rb'))\n\n def save(self, path):\n torch.save(self.actor_critic.state_dict(), path)\n \n def eval(self, it):\n # eval initilization\n self.vec_env.eval(vis=save_video)\n test_times = 0\n success_times = 0 # total_success_times / total_trials\n success_rates = [] # s_rate for each round\n reward_all = []\n if 'gf_check' in self.action_type:\n total_diff_direction_num = 0\n total_dof_error = 0\n diff_joint_num = torch.zeros(18,device=self.device)\n \n if self.vec_env.mode == 'train':\n save_time = 0 # means save all videos\n else:\n save_time = self.eval_round - 1\n\n # start evaluation\n with tqdm(total=self.eval_round) as pbar:\n pbar.set_description('Validating:')\n with torch.no_grad():\n for r in range(self.eval_round) :\n if save_video and r<=save_time:\n all_images = torch.tensor([],device=self.device)\n # reset env\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n eval_done_envs = torch.zeros(self.vec_env.num_envs, dtype=torch.long, device=self.device)\n\n if save_state:\n self.eval_metrics['gf_state_init'].append(self.vec_env.get_states(gf_state=True))\n self.eval_metrics['gf_state_gt'].append(self.vec_env.target_hand_dof)\n\n # step\n while True :\n # Compute the action\n actions, grad = self.compute_action(current_obs=current_obs,mode='eval')\n # print(grad)\n step_actions = self.process_actions(actions=actions.clone(), grad=grad)\n # primitive_actions.append(torch.mean(grad).item())\n # all_actions.append(torch.mean(step_actions).item())\n if self.vec_env.progress_buf[0] == 49 and save_state:\n self.eval_metrics['gf_state_final'].append(self.vec_env.get_states(gf_state=True))\n\n # Step the vec_environment\n next_obs, rews, dones, infos = self.vec_env.step(step_actions, (actions,grad))\n\n if save_video and r<=save_time:\n image = self.vec_env.render(rgb=True, img_size=img_size, vis_env_num=self.vis_env_num).reshape(self.vis_env_num, 1, img_size, img_size, 3)\n all_images = torch.cat([all_images, image],1)\n current_obs.copy_(next_obs['obs'])\n\n # done\n new_done_env_ids = (dones&(1-eval_done_envs)).nonzero(as_tuple=False).squeeze(-1)\n if len(new_done_env_ids) > 0:\n if self.vec_env.disable_collision:\n print('-----------------------------------')\n print('no coll succ:', infos['success_num'])\n self.vec_env.grasp_filter(states=self.eval_metrics['gf_state_final'][r], test_time=1, reset_coll=True)\n \n self.eval_metrics['time_step'].append(it)\n self.eval_metrics['success_rate'].append(float(infos['success_rate'].cpu().numpy()))\n # self.eval_metrics['obj_translation'].append(float(infos['obj_translation'].cpu().numpy()))\n # self.eval_metrics['obj_cosine_similarity'].append(float(infos['obj_cosine_similarity'].cpu().numpy()))\n self.eval_metrics['gt_dist'].append(float(infos['gt_dist'].cpu().numpy()))\n self.eval_metrics['lift_nums']+=infos['lift_nums'].cpu().numpy()\n if self.vec_env.mode == 'eval':\n with open(f'logs/{self.args.exp_name}/metrics_{self.args.eval_name}_eval_{self.args.seed}.pkl', 'wb') as f: \n pickle.dump(self.eval_metrics, f)\n else:\n with open(os.path.join(self.log_dir, f'metric_{self.args.exp_name}_{self.args.seed}.pkl'), 'wb') as f: \n pickle.dump(self.eval_metrics, f)\n\n if 'gf_check' in self.action_type:\n final_hand_dof = self.vec_env.final_hand_dof\n target_hand_dof = self.vec_env.target_hand_dof\n diff_direction_ids = ((self.vec_env.final_hand_dof * self.vec_env.target_hand_dof)<0).nonzero() \n same_direction_ids = ((self.vec_env.final_hand_dof * self.vec_env.target_hand_dof)>0).nonzero() \n for mm in range(18):\n diff_joint_num[mm] += torch.sum(diff_direction_ids[:,1]==mm) \n print(len(diff_direction_ids)/self.vec_env.num_envs)\n print(diff_joint_num)\n dof_error = torch.mean(abs(target_hand_dof[same_direction_ids[:,0],same_direction_ids[:,1]] - final_hand_dof[same_direction_ids[:,0],same_direction_ids[:,1]]))\n print(dof_error)\n total_diff_direction_num+=(len(diff_direction_ids)/self.vec_env.num_envs)\n total_dof_error+=(dof_error)\n\n if r > save_time:\n self.vec_env.graphics_device_id = -1\n self.vec_env.enable_camera_sensors = False\n\n if save_video and r<=save_time:\n for (i,images) in enumerate(all_images):\n obj_type = self.vec_env.object_type_per_env[i]\n save_path = os.path.join(self.video_log_dir,f'{obj_type}_epoach:{it}_round:{r}')\n images_to_video(path=save_path, images=images.cpu().numpy(), size=(img_size,img_size))\n\n test_times += len(new_done_env_ids)\n success_times += infos['success_num']\n reward_all.extend(rews[new_done_env_ids].cpu().numpy())\n eval_done_envs[new_done_env_ids] = 1\n print(f'eval_success_rate: {success_times/test_times}')\n success_rates.append(infos['success_num'] / len(new_done_env_ids))\n\n if test_times==(r+1)*self.vec_env.num_envs:\n break\n pbar.update(1)\n if 'gf_check' in self.action_type:\n print(f'total_diff_direction_num:{total_diff_direction_num/self.eval_round}')\n print(f'total_dof_error:{total_dof_error/self.eval_round}')\n\n assert test_times==self.eval_round*self.vec_env.num_envs\n success_rates = torch.tensor(success_rates)\n sr_mu, sr_std = success_rates.mean().cpu().numpy().item(), success_rates.std().cpu().numpy().item()\n print(f'====== t0: {self.t0} || num_envs: {self.vec_env.num_envs} || eval_times: {self.eval_round}')\n print(f'eval_success_rate: {sr_mu:.2f} +- {sr_std:.2f}')\n eval_rews = np.mean(reward_all)\n print(f'eval_rewards: {eval_rews}')\n self.writer.add_scalar('Eval/success_rate', sr_mu, it)\n self.writer.add_scalar('Eval/eval_rews', eval_rews, it)\n\n def run(self, num_learning_iterations, log_interval=1):\n if self.is_testing:\n self.eval(0)\n else:\n # train initilization\n self.actor_critic.train()\n self.vec_env.train()\n rewbuffer = deque(maxlen=100)\n lenbuffer = deque(maxlen=100)\n cur_reward_sum = torch.zeros(self.vec_env.num_envs, dtype=torch.float, device=self.device)\n cur_episode_length = torch.zeros(self.vec_env.num_envs, dtype=torch.float, device=self.device)\n reward_sum = []\n episode_length = []\n\n # reset env\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n for it in range(self.current_learning_iteration, num_learning_iterations):\n start = time.time()\n ep_infos = []\n if 'ori_similarity' in self.vec_env.reward_type:\n ori_sim_all = []\n # Rollout\n for _ in range(self.num_transitions_per_env):\n if self.apply_reset:\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n\n # Compute the action\n actions, actions_log_prob, values, mu, sigma, grad = self.compute_action(current_obs=current_obs, current_states=current_states)\n step_actions = self.process_actions(actions=actions.clone(), grad=grad)\n\n # Step the vec_environment\n next_obs, rews, dones, infos = self.vec_env.step(step_actions, (actions,grad))\n\n next_states = self.vec_env.get_state()\n\n # Record the transition\n self.storage.add_transitions(current_obs, current_states, actions, rews, dones, values, actions_log_prob, mu, sigma)\n current_obs.copy_(next_obs['obs'])\n current_states.copy_(next_states)\n\n # Book keeping\n ep_infos.append(infos.copy())\n # set_trace()\n if 'ori_similarity' in self.vec_env.reward_type:\n ori_sim_all.append(torch.mean(infos['ori_similarity']))\n # self.writer.add_scalar('Episode/ori_sim_all', torch.mean(infos['ori_similarity']), _)\n\n if self.print_log:\n cur_reward_sum[:] += rews\n cur_episode_length[:] += 1\n\n new_ids = (dones > 0).nonzero(as_tuple=False)\n reward_sum.extend(cur_reward_sum[new_ids][:, 0].cpu().numpy().tolist())\n episode_length.extend(cur_episode_length[new_ids][:, 0].cpu().numpy().tolist())\n cur_reward_sum[new_ids] = 0\n cur_episode_length[new_ids] = 0\n \n # done\n if torch.sum(dones) > 0:\n current_obs = self.vec_env.reset(dones)['obs']\n current_states = self.vec_env.get_state()\n print(infos['success_rate'])\n if 'ori_similarity' in self.vec_env.reward_type:\n fig = plt.figure()\n plt.plot(torch.tensor(ori_sim_all).cpu().numpy())\n ori_sim_all_img = get_img_from_fig(fig, dpi=100)\n # ori_sim_all_img = cv2.resize(ori_sim_all_img,(256,256))\n self.writer.add_image(\"ori_sim\", ori_sim_all_img, it, dataformats='HWC')\n\n if self.print_log:\n # reward_sum = [x[0] for x in reward_sum]\n # episode_length = [x[0] for x in episode_length]\n rewbuffer.extend(reward_sum)\n lenbuffer.extend(episode_length)\n\n _, _, last_values, _, _, _ = self.compute_action(current_obs=current_obs, current_states=current_states, mode='train')\n stop = time.time()\n collection_time = stop - start\n mean_trajectory_length, mean_reward = self.storage.get_statistics()\n\n # Learning step\n start = stop\n self.storage.compute_returns(last_values, self.gamma, self.lam)\n mean_value_loss, mean_surrogate_loss = self.update()\n self.storage.clear()\n stop = time.time()\n learn_time = stop - start\n if self.print_log:\n self.log(locals())\n if it % log_interval == 0:\n self.actor_critic.eval()\n self.eval(it)\n self.actor_critic.train()\n self.vec_env.train()\n self.save(os.path.join(self.log_dir, 'model_{}.pt'.format(it)))\n\n current_obs = self.vec_env.reset()['obs']\n current_states = self.vec_env.get_state()\n cur_episode_length[:] = 0\n # TODO clean extras\n ep_infos.clear()\n self.save(os.path.join(self.log_dir, 'model_{}.pt'.format(num_learning_iterations)))\n\n def log(self, locs, width=70, pad=35):\n self.tot_timesteps += self.num_transitions_per_env * self.vec_env.num_envs\n self.tot_time += locs['collection_time'] + locs['learn_time']\n iteration_time = locs['collection_time'] + locs['learn_time']\n\n ep_string = f''\n if locs['ep_infos']:\n for key in locs['ep_infos'][0]:\n infotensor = torch.tensor([], device=self.device)\n for ep_info in locs['ep_infos']:\n infotensor = torch.cat((infotensor, ep_info[key].to(self.device)))\n if key=='success_num':\n value = torch.sum(infotensor)\n self.writer.add_scalar('Episode/' + 'total_success_num', value, locs['it'])\n ep_string += f\"\"\"{f'Total episode {key}:':>{pad}} {value:.4f}\\n\"\"\"\n value = torch.mean(infotensor)\n self.writer.add_scalar('Episode/' + key, value, locs['it'])\n ep_string += f\"\"\"{f'Mean episode {key}:':>{pad}} {value:.4f}\\n\"\"\"\n mean_std = self.actor_critic.log_std.exp().mean()\n\n self.writer.add_scalar('Loss/value_function', locs['mean_value_loss'], locs['it'])\n self.writer.add_scalar('Loss/surrogate', locs['mean_surrogate_loss'], locs['it'])\n self.writer.add_scalar('Policy/mean_noise_std', mean_std.item(), locs['it'])\n if len(locs['rewbuffer']) > 0:\n self.writer.add_scalar('Train/mean_reward', statistics.mean(locs['rewbuffer']), locs['it'])\n self.writer.add_scalar('Train/mean_episode_length', statistics.mean(locs['lenbuffer']), locs['it'])\n self.writer.add_scalar('Train/mean_reward/time', statistics.mean(locs['rewbuffer']), self.tot_time)\n self.writer.add_scalar('Train/mean_episode_length/time', statistics.mean(locs['lenbuffer']), self.tot_time)\n\n self.writer.add_scalar('Train2/mean_reward/step', locs['mean_reward'], locs['it'])\n self.writer.add_scalar('Train2/mean_episode_length/episode', locs['mean_trajectory_length'], locs['it'])\n\n fps = int(self.num_transitions_per_env * self.vec_env.num_envs / (locs['collection_time'] + locs['learn_time']))\n\n str = f\" \\033[1m Learning iteration {locs['it']}/{locs['num_learning_iterations']} \\033[0m \"\n\n if len(locs['rewbuffer']) > 0:\n log_string = (f\"\"\"{'#' * width}\\n\"\"\"\n f\"\"\"{str.center(width, ' ')}\\n\\n\"\"\"\n f\"\"\"{'Computation:':>{pad}} {fps:.0f} steps/s (collection: {locs[\n 'collection_time']:.3f}s, learning {locs['learn_time']:.3f}s)\\n\"\"\"\n f\"\"\"{'Value function loss:':>{pad}} {locs['mean_value_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Surrogate loss:':>{pad}} {locs['mean_surrogate_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Mean action noise std:':>{pad}} {mean_std.item():.2f}\\n\"\"\"\n f\"\"\"{'Mean reward:':>{pad}} {statistics.mean(locs['rewbuffer']):.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length:':>{pad}} {statistics.mean(locs['lenbuffer']):.2f}\\n\"\"\"\n f\"\"\"{'Mean reward/step:':>{pad}} {locs['mean_reward']:.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length/episode:':>{pad}} {locs['mean_trajectory_length']:.2f}\\n\"\"\")\n else:\n log_string = (f\"\"\"{'#' * width}\\n\"\"\"\n f\"\"\"{str.center(width, ' ')}\\n\\n\"\"\"\n f\"\"\"{'Computation:':>{pad}} {fps:.0f} steps/s (collection: {locs[\n 'collection_time']:.3f}s, learning {locs['learn_time']:.3f}s)\\n\"\"\"\n f\"\"\"{'Value function loss:':>{pad}} {locs['mean_value_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Surrogate loss:':>{pad}} {locs['mean_surrogate_loss']:.4f}\\n\"\"\"\n f\"\"\"{'Mean action noise std:':>{pad}} {mean_std.item():.2f}\\n\"\"\"\n f\"\"\"{'Mean reward/step:':>{pad}} {locs['mean_reward']:.2f}\\n\"\"\"\n f\"\"\"{'Mean episode length/episode:':>{pad}} {locs['mean_trajectory_length']:.2f}\\n\"\"\")\n\n log_string += ep_string\n log_string += (f\"\"\"{'-' * width}\\n\"\"\"\n f\"\"\"{'Total timesteps:':>{pad}} {self.tot_timesteps}\\n\"\"\"\n f\"\"\"{'Iteration time:':>{pad}} {iteration_time:.2f}s\\n\"\"\"\n f\"\"\"{'Total time:':>{pad}} {self.tot_time:.2f}s\\n\"\"\"\n f\"\"\"{'ETA:':>{pad}} {self.tot_time / (locs['it'] + 1) * (\n locs['num_learning_iterations'] - locs['it']):.1f}s\\n\"\"\")\n print(log_string)\n\n def update(self):\n mean_value_loss = 0\n mean_surrogate_loss = 0\n\n batch = self.storage.mini_batch_generator(self.num_mini_batches)\n\n for epoch in range(self.num_learning_epochs):\n # for obs_batch, actions_batch, target_values_batch, advantages_batch, returns_batch, old_actions_log_prob_batch \\\n # in self.storage.mini_batch_generator(self.num_mini_batches):\n\n for indices in batch:\n # print(len(indices))\n\n obs_batch = self.storage.observations.view(-1, *self.storage.observations.size()[2:])[indices]\n if self.asymmetric:\n states_batch = self.storage.states.view(-1, *self.storage.states.size()[2:])[indices]\n else:\n states_batch = None\n actions_batch = self.storage.actions.view(-1, self.storage.actions.size(-1))[indices]\n target_values_batch = self.storage.values.view(-1, 1)[indices]\n returns_batch = self.storage.returns.view(-1, 1)[indices]\n old_actions_log_prob_batch = self.storage.actions_log_prob.view(-1, 1)[indices]\n advantages_batch = self.storage.advantages.view(-1, 1)[indices]\n old_mu_batch = self.storage.mu.view(-1, self.storage.actions.size(-1))[indices]\n old_sigma_batch = self.storage.sigma.view(-1, self.storage.actions.size(-1))[indices]\n\n actions_log_prob_batch, entropy_batch, value_batch, mu_batch, sigma_batch = self.actor_critic.evaluate(obs_batch,\n states_batch,\n actions_batch)\n\n # KL\n if self.desired_kl != None and self.schedule == 'adaptive':\n\n kl = torch.sum(\n sigma_batch - old_sigma_batch + (torch.square(old_sigma_batch.exp()) + torch.square(old_mu_batch - mu_batch)) / (2.0 * torch.square(sigma_batch.exp())) - 0.5, axis=-1)\n kl_mean = torch.mean(kl)\n\n if kl_mean > self.desired_kl * 2.0:\n self.step_size = max(1e-5, self.step_size / 1.5)\n elif kl_mean < self.desired_kl / 2.0 and kl_mean > 0.0:\n self.step_size = min(1e-2, self.step_size * 1.5)\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.step_size\n\n # Surrogate loss\n ratio = torch.exp(actions_log_prob_batch - torch.squeeze(old_actions_log_prob_batch))\n surrogate = -torch.squeeze(advantages_batch) * ratio\n surrogate_clipped = -torch.squeeze(advantages_batch) * torch.clamp(ratio, 1.0 - self.clip_param,\n 1.0 + self.clip_param)\n surrogate_loss = torch.max(surrogate, surrogate_clipped).mean()\n\n # Value function loss\n if self.use_clipped_value_loss:\n value_clipped = target_values_batch + (value_batch - target_values_batch).clamp(-self.clip_param,\n self.clip_param)\n value_losses = (value_batch - returns_batch).pow(2)\n value_losses_clipped = (value_clipped - returns_batch).pow(2)\n value_loss = torch.max(value_losses, value_losses_clipped).mean()\n else:\n value_loss = (returns_batch - value_batch).pow(2).mean()\n\n loss = surrogate_loss + self.value_loss_coef * value_loss - self.entropy_coef * entropy_batch.mean()\n\n # Gradient step\n self.optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n mean_value_loss += value_loss.item()\n mean_surrogate_loss += surrogate_loss.item()\n\n num_updates = self.num_learning_epochs * self.num_mini_batches\n mean_value_loss /= num_updates\n mean_surrogate_loss /= num_updates\n\n return mean_value_loss, mean_surrogate_loss\n\n '''\n utils\n '''\n def grad_norm(self,grad):\n scale_grad = (torch.max((abs(grad)),dim=1)[0]).reshape(-1,1).expand_as(grad)\n grad = grad/scale_grad\n return grad\n \n \n def action2grad(self, x, inv=False, relative=True, cur_x=None):\n if not inv:\n batch_size = x.size(0)\n state_dim = x.size(1)\n x = torch.cat([torch.sin(x).reshape(batch_size,state_dim,1), torch.cos(x).reshape(batch_size,state_dim,1)],2).reshape(batch_size,-1)\n return x\n else:\n batch_size = x.size(0)\n state_dim = x.size(1)\n x = x.reshape(batch_size,int(state_dim/2),2)\n cur_x = cur_x.reshape(batch_size,int(state_dim/2),2)\n\n cur_x = torch.cat([-cur_x[:,:,0:1], cur_x[:,:,1:2]],dim=-1)\n ori_grad = torch.sum(torch.cat([x[:,:,1:2], x[:,:,0:1]], dim=-1) * cur_x, dim=-1, keepdim=True).reshape(batch_size,int(state_dim/2))\n return ori_grad\n \n def get_obs_with_grad(self, current_obs, reset=False, t=None):\n # compute score\n B = current_obs.size(0)\n cur_hand_dof = current_obs[:,:18].clone() #【-1,1】\n pcl_index = self.stack_frame_numer*7 + 18\n cur_obj_pcl = current_obs[:,pcl_index:self.points_per_object*3+pcl_index].clone().reshape(-1, 3, self.points_per_object)\n\n if reset:\n with torch.no_grad(): \n in_process_sample, res = cond_ode_sampler(\n self.score,\n self.prior_fn,\n self.sde_fn,\n (cur_hand_dof, cur_obj_pcl),\n t0=0.5,\n device=self.device,\n num_steps=51,\n batch_size=B,\n space=self.args.space,\n )\n goal_pose = in_process_sample[-1,:,:]\n return goal_pose\n else:\n if self.args.space == 'riemann':\n if 'direct' in self.args.score_model_path:\n cur_hand_dof = self.vec_env.dof_norm(cur_hand_dof,inv=True)\n cur_hand_dof = self.action2grad(cur_hand_dof)\n\n if t is None:\n batch_time_step = torch.ones(B, device=self.device).unsqueeze(1) * self.t0\n else:\n t_max = 0.5\n t_min = 1e-5\n t = torch.tanh(t) * (t_max - t_min) / 2 + (t_max + t_min)/2\n batch_time_step = torch.clamp(t.reshape(B,-1), 1e-5, 0.5)\n self.vec_env.extras['t_value'] = torch.mean(abs(batch_time_step),-1)\n\n if self.args.space == 'riemann':\n grad = torch.zeros(B,36,device=self.device)\n elif self.args.space == 'euler':\n grad = torch.zeros(B,18,device=self.device)\n\n bz = 256\n iter_num = int(np.ceil(B/bz))\n\n for order in range(iter_num):\n with torch.no_grad(): \n if self.args.space == 'riemann':\n grad[order*bz:(order+1)*bz,:36] = self.score((cur_hand_dof[order*bz:(order+1)*bz,:], cur_obj_pcl[order*bz:(order+1)*bz,:]), batch_time_step[order*bz:(order+1)*bz,:]).detach()\n elif self.args.space == 'euler': \n grad[order*bz:(order+1)*bz,:18] = self.score((cur_hand_dof[order*bz:(order+1)*bz,:], cur_obj_pcl[order*bz:(order+1)*bz,:]), batch_time_step[order*bz:(order+1)*bz,:]).detach()\n\n if self.args.space == 'riemann':\n grad = self.action2grad(grad, inv=True, cur_x=cur_hand_dof)\n\n if 'pure_ori_similarity' in self.vec_env.reward_type:\n self.ori_grad = grad.clone()\n\n if 'direct' not in self.args.score_model_path:\n #denormalize to dof original range\n grad = grad * self.vec_env.shadow_hand_dof_range[self.vec_env.actuated_dof_indices] / 2\n\n if self.grad_process is not None:\n if 'norm' in self.grad_process:\n grad = self.grad_norm(grad)\n if 'clip' in self.grad_process:\n grad = torch.clamp(grad,-self.grad_scale,self.grad_scale)\n if 'scale' in self.grad_process:\n grad = grad * self.grad_scale\n\n if 'pure_ori_similarity' not in self.vec_env.reward_type:\n self.ori_grad = grad.clone()\n\n if self.action_type != 'controlt':\n current_obs[:,-18:] = grad\n\n # print(grad[0])\n return current_obs, grad\n \n def process_actions(self, actions, grad):\n if self.action_type=='joint':\n if self.sub_action_type=='add+jointscale':\n self.vec_env.extras['grad_ss_mean'] = torch.mean(abs(actions[:,:18]),-1)\n self.vec_env.extras['grad_ss_std'] = torch.std(abs(actions[:,:18]),-1)\n self.vec_env.extras['residual_mean'] = torch.mean(abs(actions[:,18:]),-1)\n self.vec_env.extras['residual_std'] = torch.std(abs(actions[:,18:]),-1)\n step_actions = grad*actions[:,:18] + actions[:,18:]\n else:\n step_actions = actions*grad\n elif self.action_type=='direct':\n step_actions = actions\n elif 'gf' in self.action_type:\n step_actions = grad\n return step_actions\n\n def compute_action(self, current_obs, current_states=None, mode='train'):\n # compute gf\n if 'gf' in self.vec_env.sub_obs_type:\n current_obs, grad = self.get_obs_with_grad(current_obs)\n else:\n grad = torch.zeros((current_obs.size(0),18), device=self.device)\n\n if self.pointnet_finetune:\n batch_num = current_obs.size(0)//self.finetune_pointnet_bz + 1\n for _ in range(batch_num):\n current_obs_batch = current_obs[self.finetune_pointnet_bz*_:self.finetune_pointnet_bz*(_+1),:]\n # current_states_batch = current_states[:,self.finetune_pointnet_bz*batch_num+self.finetune_pointnet_bz*(batch_num+1)]\n if mode=='train':\n actions_batch, actions_log_prob_batch, values_batch, mu_batch, sigma_batch = self.actor_critic.act(current_obs_batch, current_states)\n else:\n actions_batch = self.actor_critic.act_inference(current_obs_batch)\n if _ == 0:\n if mode=='train':\n actions, actions_log_prob, values, mu, sigma = actions_batch, actions_log_prob_batch, values_batch, mu_batch, sigma_batch\n else:\n actions = actions_batch\n else:\n if mode=='train':\n actions = torch.cat([actions, actions_batch])\n actions_log_prob = torch.cat([actions_log_prob,actions_log_prob_batch])\n values = torch.cat([values,values_batch])\n mu = torch.cat([mu, mu_batch])\n sigma = torch.cat([sigma, sigma_batch])\n else:\n actions = torch.cat([actions, actions_batch])\n else:\n if mode=='train':\n actions, actions_log_prob, values, mu, sigma = self.actor_critic.act(current_obs, current_states)\n else:\n actions = self.actor_critic.act_inference(current_obs)\n\n if mode=='train':\n return actions, actions_log_prob, values, mu, sigma, grad\n else:\n return actions, grad" }, { "identifier": "load_cfg", "path": "utils/config.py", "snippet": "def load_cfg(args):\n with open(os.path.join(os.path.dirname(__file__), '../ConDexEnv/condexenvs/cfg/train/', args.cfg_train+'.yaml'), 'r') as f:\n cfg_train = yaml.load(f, Loader=yaml.SafeLoader)\n\n logdir = args.logdir\n\n # Set deterministic mode\n if args.torch_deterministic:\n cfg_train[\"torch_deterministic\"] = True\n\n # Override seed if passed on the command line\n if args.seed is not None:\n cfg_train[\"seed\"] = args.seed\n\n log_id = args.logdir + \"_{}\".format(args.experiment)\n\n logdir = os.path.realpath(log_id)\n # os.makedirs(logdir, exist_ok=True)\n\n return cfg_train, logdir" }, { "identifier": "get_args", "path": "utils/config.py", "snippet": "def get_args(benchmark=False, use_rlg_config=False):\n custom_parameters = [\n \n # env \n {\"name\": \"--headless\", \"action\": \"store_true\", \"default\": False, \"help\": \"Force display off at all times\"},\n {\"name\": \"--rl_device\", \"type\": str, \"default\": \"cuda:1\", \"help\": \"Choose CPU or GPU device for inferencing policy network\"},\n {\"name\": \"--randomize\", \"action\": \"store_true\", \"default\": False, \"help\": \"Apply physics domain randomization\"},\n {\"name\": \"--num_envs\", \"type\": int, \"default\": 2, \"help\": \"Number of environments to create - override config file\"},\n {\"name\": \"--episode_length\", \"type\": int, \"default\": 0, \"help\": \"Episode length, by default is read from yaml config\"},\n {\"name\": \"--seed\", \"type\": int, \"help\": \"Random seed\"},\n {\"name\": \"--points_per_object\", \"type\": int, \"default\": 1024, \"help\": \"points for each object pcl\"},\n {\"name\": \"--method\", \"type\": str, \"default\": \"gf+rl\", \"help\": \"method\"},\n {\"name\": \"--run_device_id\", \"type\": int, \"help\": \"device id\"},\n {\"name\": \"--dataset_type\", \"type\": str, \"default\": \"train\", \"help\": \"method\"},\n # mode\n {\"name\": \"--mode\", \"type\": str, \"default\": \"train\", \"help\": \"env_mode\"},\n {\"name\": \"--test\", \"action\": \"store_true\", \"default\": False, \"help\": \"Run trained policy, no training\"},\n {\"name\": \"--eval_times\", \"type\": int, \"default\": 5, \"help\": \"eval times for each object\"},\n {\"name\": \"--constrained\", \"action\": \"store_true\", \"help\": \"whether constrain base\"},\n \n # score matching parameter\n {\"name\": \"--sigma\", \"type\": float, \"default\": 25, \"help\": \"eval times for each object\"},\n {\"name\": \"--t0\", \"type\": float, \"default\": 0.1, \"help\": \"t0 for sample\"},\n {\"name\": \"--hidden_dim\", \"type\": int, \"default\": 1024, \"help\": \"num of hidden dim\"},\n {\"name\": \"--embed_dim\", \"type\": int, \"default\": 512, \"help\": \"num of embed_dim\"},\n {\"name\": \"--score_mode\", \"type\": str, \"default\": \"target\", \"help\": \"score mode\"},\n {\"name\": \"--space\", \"type\": str, \"default\": \"riemann\", \"help\": \"angle space\"},\n {\"name\": \"--relative\", \"action\": \"store_false\", \"help\": \"relative pcl representation\"},\n {\"name\": \"--score_model_path\", \"type\": str, \"default\": \"./logs/train_all_rel_p2cuda_v_2e-4_2\", \"help\": \"pretrain score model path\"},\n # rl train \n {\"name\": \"--torch_deterministic\", \"action\": \"store_true\", \"default\": False, \"help\": \"Apply additional PyTorch settings for more deterministic behaviour\"},\n {\"name\": \"--metadata\", \"action\": \"store_true\", \"default\": False, \"help\": \"Requires --experiment flag, adds physics engine, sim device, pipeline info and if domain randomization is used to the experiment name provided by user\"},\n {\"name\": \"--resume\", \"type\": int, \"default\": 0, \"help\": \"Resume training or start testing from a checkpoint\"},\n {\"name\": \"--cfg_train\", \"type\": str, \"default\": \"ShadowHandConPPO\"},\n {\"name\": \"--max_iterations\", \"type\": int, \"default\": 0, \"help\": \"Set a maximum number of training iterations\"},\n {\"name\": \"--minibatch_size\", \"type\": int, \"default\": -1, \"help\": \"Set batch size for PPO optimization step. Supported only by rl_games. If not -1 overrides the config settings.\"},\n # log\n {\"name\": \"--logdir\", \"type\": str, \"default\": \"logs/gfppo/\"}, \n {\"name\": \"--experiment\", \"type\": str, \"default\": \"Base\", \"help\": \"Experiment name. If used with --metadata flag an additional information about physics engine, sim device, pipeline and domain randomization will be added to the name\"},\n {\"name\": \"--model_dir\", \"type\": str, \"default\": \"\", \"help\": \"Choose a model dir\"},\n {\"name\": \"--exp_name\", \"type\": str, \"default\": \"ours\", \"help\": \"exp_name\"},\n {\"name\": \"--eval_name\", \"type\": str, \"default\": \"ours\", \"help\": \"exp_name\"},\n {\"name\": \"--vis_env_num\", \"type\": int, \"default\": \"0\", \"help\": \"vis env num\"},\n ]\n \n\n # parse arguments\n args = gymutil.parse_arguments(\n description=\"RL Policy\",\n custom_parameters=custom_parameters)\n\n # allignment with examples\n args.device_id = args.compute_device_id\n args.device = args.sim_device_type if args.use_gpu_pipeline else 'cpu'\n\n if args.test:\n args.train = False\n else:\n args.train = True\n\n return args" }, { "identifier": "set_np_formatting", "path": "utils/config.py", "snippet": "def set_np_formatting():\n np.set_printoptions(edgeitems=30, infstr='inf',\n linewidth=4000, nanstr='nan', precision=2,\n suppress=False, threshold=10000, formatter=None)" } ]
import isaacgym import condexenvs import torch import os import sys from Algorithms.ppo import GFPPO from utils.config import load_cfg, get_args, set_np_formatting
11,134
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 reward_normalize = False cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "ori_similarity+height+sr" reward_normalize = False sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False ''' training setting ''' cfg_train['learn']['nsteps'] = 50 cfg_train['learn']['noptepochs'] = 2 cfg_train['learn']['nminibatches'] = int(args.num_envs*cfg_train['learn']['nsteps']/64) if cfg_train['policy']['pointnet_version'] == 'pt': cfg_train['learn']['optim_stepsize'] = 0.0003 else: cfg_train['learn']['optim_stepsize'] = 0.0003 cfg_train['learn']['desired_kl'] = 0.016 cfg_train['learn']['gamma'] = 0.99 envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = -1, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, reward_normalize = reward_normalize, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. if args.model_dir != "": chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
sys.path.append(os.path.dirname(os.path.dirname(__file__))) if __name__ == '__main__': set_np_formatting() args = get_args() cfg_train, logdir = load_cfg(args) ''' change for different method ''' cfg_train['setting']['grad_scale'] = 1.0 reward_normalize = False cfg_train['policy']['pointnet_version'] = 'pt' if args.exp_name == 'ours': reward_type = "ori_similarity+height+sr" reward_normalize = False sub_obs_type = "joint+fingertipjoint+wrist+objpcl+gf" cfg_train['setting']['action_type'] = "joint" cfg_train['setting']['sub_action_type'] = "add+jointscale" cfg_train['policy']['pretrain_pointnet'] = True ''' policy ''' cfg_train['policy']['hand_pcl'] = False ''' training setting ''' cfg_train['learn']['nsteps'] = 50 cfg_train['learn']['noptepochs'] = 2 cfg_train['learn']['nminibatches'] = int(args.num_envs*cfg_train['learn']['nsteps']/64) if cfg_train['policy']['pointnet_version'] == 'pt': cfg_train['learn']['optim_stepsize'] = 0.0003 else: cfg_train['learn']['optim_stepsize'] = 0.0003 cfg_train['learn']['desired_kl'] = 0.016 cfg_train['learn']['gamma'] = 0.99 envs = condexenvs.make( seed=args.seed, task="ShadowHandCon", num_envs=args.num_envs, sim_device=f"cuda:{args.run_device_id}", rl_device=f"cuda:{args.run_device_id}", graphics_device_id = -1, headless=args.headless, mode = args.mode, eval_times=args.eval_times, method = args.method, constrained = args.constrained, reward_type = reward_type, reward_normalize = reward_normalize, sub_obs_type = sub_obs_type, dataset_type = args.dataset_type, ) envs.reset(env_init=True) learn_cfg = cfg_train["learn"] is_testing = learn_cfg["test"] # Override resume and testing flags if they are passed as parameters. if args.model_dir != "": chkpt_path = args.model_dir logdir = logdir + "_seed{}".format(args.seed)
runner = GFPPO(vec_env=envs,
0
2023-11-09 06:08:40+00:00
16k
ApolloAuto/apollo-model-centerpoint
tools/create_bevformer_nus_infos.py
[ { "identifier": "NuscenesMVDataset", "path": "paddle3d/datasets/nuscenes/nuscenes_multiview_det.py", "snippet": "class NuscenesMVDataset(NuscenesDetDataset):\n \"\"\"\n Nuscecens dataset for multi-view camera detection task.\n \"\"\"\n DATASET_NAME = \"Nuscenes\"\n\n def __init__(self,\n dataset_root: str,\n ann_file: str = None,\n mode: str = \"train\",\n transforms: Union[TransformABC, List[TransformABC]] = None,\n max_sweeps: int = 10,\n class_balanced_sampling: bool = False,\n class_names: Union[list, tuple] = None,\n queue_length=None,\n use_valid_flag=False,\n with_velocity=True):\n\n self.mode = mode\n self.dataset_root = dataset_root\n self.filter_empty_gt = True\n self.box_type_3d = 'LiDAR'\n self.box_mode_3d = None\n self.ann_file = ann_file\n self.version = self.VERSION_MAP[self.mode]\n\n self.max_sweeps = max_sweeps\n self._build_data()\n self.metadata = self.data_infos['metadata']\n\n self.data_infos = list(\n sorted(self.data_infos['infos'], key=lambda e: e['timestamp']))\n\n if isinstance(transforms, list):\n transforms = T.Compose(transforms)\n\n self.transforms = transforms\n\n if 'train' in self.mode:\n self.flag = np.zeros(len(self), dtype=np.uint8)\n\n self.modality = dict(\n use_camera=True,\n use_lidar=False,\n use_radar=False,\n use_map=False,\n use_external=True,\n )\n self.with_velocity = with_velocity\n self.use_valid_flag = use_valid_flag\n self.channel = \"LIDAR_TOP\"\n if class_names is not None:\n self.class_names = class_names\n else:\n self.class_names = list(self.CLASS_MAP.keys())\n self.queue_length = queue_length\n\n def __len__(self):\n return len(self.data_infos)\n\n def _rand_another(self, idx):\n \"\"\"Randomly get another item with the same flag.\n Returns:\n int: Another index of item with the same flag.\n \"\"\"\n pool = np.where(self.flag == self.flag[idx])[0]\n return np.random.choice(pool)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n Args:\n index (int): Index of the annotation data to get.\n Returns:\n dict: Annotation information consists of the following keys:\n - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \\\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - gt_names (list[str]): Class names of ground truths.\n \"\"\"\n info = self.data_infos[index]\n # filter out bbox containing no points\n if self.use_valid_flag:\n mask = info['valid_flag']\n else:\n mask = info['num_lidar_pts'] > 0\n gt_bboxes_3d = info['gt_boxes'][mask]\n gt_names_3d = info['gt_names'][mask]\n gt_labels_3d = []\n for cat in gt_names_3d:\n if cat in self.CLASS_MAP:\n # gt_labels_3d.append(self.CLASS_MAP[cat])\n gt_labels_3d.append(self.class_names.index(cat))\n else:\n gt_labels_3d.append(-1)\n gt_labels_3d = np.array(gt_labels_3d)\n\n if self.with_velocity:\n gt_velocity = info['gt_velocity'][mask]\n nan_mask = np.isnan(gt_velocity[:, 0])\n gt_velocity[nan_mask] = [0.0, 0.0]\n gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)\n\n # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be\n # the same as KITTI (0.5, 0.5, 0)\n origin = [0.5, 0.5, 0.5]\n dst = np.array([0.5, 0.5, 0], dtype=gt_bboxes_3d.dtype)\n src = np.array(origin, dtype=gt_bboxes_3d.dtype)\n gt_bboxes_3d[:, :3] += gt_bboxes_3d[:, 3:6] * (dst - src)\n gt_bboxes_3d = BBoxes3D(\n gt_bboxes_3d, coordmode=2, origin=[0.5, 0.5, 0.5])\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n gt_names=gt_names_3d)\n return anns_results\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n Args:\n index (int): Index of the sample data to get.\n Returns:\n dict: Data information that will be passed to the data \\\n preprocessing pipelines. It includes the following keys:\n - sample_idx (str): Sample index.\n - pts_filename (str): Filename of point clouds.\n - sweeps (list[dict]): Infos of sweeps.\n - timestamp (float): Sample timestamp.\n - img_filename (str, optional): Image filename.\n - lidar2img (list[np.ndarray], optional): Transformations \\\n from lidar to different cameras.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n\n sample = Sample(path=None, modality=\"multiview\")\n sample.sample_idx = info['token']\n sample.meta.id = info['token']\n sample.pts_filename = osp.join(self.dataset_root, info['lidar_path'])\n sample.sweeps = copy.deepcopy(info['sweeps'])\n if self.queue_length is None:\n for i in range(len(sample.sweeps)):\n for cam_type in sample.sweeps[i].keys():\n data_path = info['sweeps'][i][cam_type]['data_path']\n sample.sweeps[i][cam_type]['data_path'] = osp.join(\n self.dataset_root, data_path)\n\n sample.timestamp = info['timestamp'] / 1e6\n if self.queue_length is not None:\n sample.ego2global_translation = info['ego2global_translation']\n sample.ego2global_rotation = info['ego2global_rotation']\n sample.prev_idx = info['prev']\n sample.next_idx = info['next']\n sample.scene_token = info['scene_token']\n sample.can_bus = info['can_bus']\n sample.frame_idx = info['frame_idx']\n\n if self.modality['use_camera']:\n image_paths = []\n lidar2img_rts = []\n intrinsics = []\n extrinsics = []\n img_timestamp = []\n for cam_type, cam_info in info['cams'].items():\n img_timestamp.append(cam_info['timestamp'] / 1e6)\n image_paths.append(\n osp.join(self.dataset_root, cam_info['data_path']))\n # obtain lidar to image transformation matrix\n lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])\n lidar2cam_t = cam_info[\n 'sensor2lidar_translation'] @ lidar2cam_r.T\n lidar2cam_rt = np.eye(4)\n lidar2cam_rt[:3, :3] = lidar2cam_r.T\n lidar2cam_rt[3, :3] = -lidar2cam_t\n intrinsic = cam_info['cam_intrinsic']\n viewpad = np.eye(4)\n viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic\n lidar2img_rt = (viewpad @ lidar2cam_rt.T)\n intrinsics.append(viewpad)\n # The extrinsics mean the tranformation from lidar to camera.\n # If anyone want to use the extrinsics as sensor to lidar, please\n # use np.linalg.inv(lidar2cam_rt.T) and modify the ResizeCropFlipImage\n # and LoadMultiViewImageFromMultiSweepsFiles.\n extrinsics.append(lidar2cam_rt)\n lidar2img_rts.append(lidar2img_rt)\n\n sample.update(\n dict(\n img_timestamp=img_timestamp,\n img_filename=image_paths,\n lidar2img=lidar2img_rts,\n intrinsics=intrinsics,\n extrinsics=extrinsics))\n\n if 'train' in self.mode:\n annos = self.get_ann_info(index)\n sample.ann_info = annos\n\n if self.queue_length is not None:\n rotation = Quaternion(sample['ego2global_rotation'])\n translation = sample['ego2global_translation']\n can_bus = sample['can_bus']\n can_bus[:3] = translation\n can_bus[3:7] = rotation\n patch_angle = quaternion_yaw(rotation) / np.pi * 180\n if patch_angle < 0:\n patch_angle += 360\n can_bus[-2] = patch_angle / 180 * np.pi\n can_bus[-1] = patch_angle\n\n return sample\n\n def __getitem__(self, index):\n if 'train' not in self.mode:\n sample = self.get_data_info(index)\n sample['img_fields'] = []\n sample['bbox3d_fields'] = []\n sample['pts_mask_fields'] = []\n sample['pts_seg_fields'] = []\n sample['bbox_fields'] = []\n sample['mask_fields'] = []\n sample['seg_fields'] = []\n sample['box_type_3d'] = self.box_type_3d\n sample['box_mode_3d'] = self.box_mode_3d\n sample = self.transforms(sample)\n return sample\n\n while True:\n if self.queue_length is None:\n sample = self.get_data_info(index)\n\n if sample is None:\n index = self._rand_another(index)\n continue\n\n sample['img_fields'] = []\n sample['bbox3d_fields'] = []\n sample['pts_mask_fields'] = []\n sample['pts_seg_fields'] = []\n sample['bbox_fields'] = []\n sample['mask_fields'] = []\n sample['seg_fields'] = []\n sample['box_type_3d'] = self.box_type_3d\n sample['box_mode_3d'] = self.box_mode_3d\n\n sample = self.transforms(sample)\n\n if self.is_train_mode and self.filter_empty_gt and \\\n (sample is None or len(sample['gt_labels_3d']) == 0 ):\n index = self._rand_another(index)\n continue\n\n return sample\n else:\n queue = []\n index_list = list(range(index - self.queue_length, index))\n random.shuffle(index_list)\n index_list = sorted(index_list[1:])\n index_list.append(index)\n for i in index_list:\n i = max(0, i)\n sample = self.get_data_info(i)\n if sample is None:\n break\n\n sample['img_fields'] = []\n sample['bbox3d_fields'] = []\n sample['pts_mask_fields'] = []\n sample['pts_seg_fields'] = []\n sample['bbox_fields'] = []\n sample['mask_fields'] = []\n sample['seg_fields'] = []\n sample['box_type_3d'] = self.box_type_3d\n sample['box_mode_3d'] = self.box_mode_3d\n\n sample = self.transforms(sample)\n if self.filter_empty_gt and \\\n (sample is None or len(sample['gt_labels_3d']) == 0):\n sample = None\n break\n queue.append(sample)\n if sample is None:\n index = self._rand_another(index)\n continue\n return self.union2one(queue)\n\n def union2one(self, queue):\n imgs_list = [each['img'] for each in queue]\n metas_map = SampleMeta()\n prev_scene_token = None\n prev_pos = None\n prev_angle = None\n for i, each in enumerate(queue):\n metas_map[i] = each['meta']\n if metas_map[i]['scene_token'] != prev_scene_token:\n metas_map[i]['prev_bev_exists'] = False\n prev_scene_token = metas_map[i]['scene_token']\n prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])\n prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])\n metas_map[i]['can_bus'][:3] = 0\n metas_map[i]['can_bus'][-1] = 0\n else:\n metas_map[i]['prev_bev_exists'] = True\n tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3])\n tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1])\n metas_map[i]['can_bus'][:3] -= prev_pos\n metas_map[i]['can_bus'][-1] -= prev_angle\n prev_pos = copy.deepcopy(tmp_pos)\n prev_angle = copy.deepcopy(tmp_angle)\n queue[-1]['img'] = np.stack(imgs_list)\n queue[-1]['meta'] = metas_map\n queue = queue[-1]\n return queue\n\n def _build_data(self):\n test = 'test' in self.version\n\n if self.ann_file is not None:\n self.data_infos = pickle.load(open(self.ann_file, 'rb'))\n return\n\n if test:\n test_ann_cache_file = os.path.join(\n self.dataset_root,\n '{}_annotation_test.pkl'.format(self.DATASET_NAME))\n if os.path.exists(test_ann_cache_file):\n self.data_infos = pickle.load(open(test_ann_cache_file, 'rb'))\n return\n else:\n train_ann_cache_file = os.path.join(\n self.dataset_root,\n '{}_annotation_train.pkl'.format(self.DATASET_NAME))\n val_ann_cache_file = os.path.join(\n self.dataset_root,\n '{}_annotation_val.pkl'.format(self.DATASET_NAME))\n if os.path.exists(train_ann_cache_file):\n self.data_infos = pickle.load(open(train_ann_cache_file, 'rb'))\n return\n\n self.nusc = NuScenesManager.get(\n version=self.version, dataroot=self.dataset_root)\n\n if self.version == 'v1.0-trainval':\n train_scenes = nuscenes_split.train\n val_scenes = nuscenes_split.val\n elif self.version == 'v1.0-test':\n train_scenes = nuscenes_split.test\n val_scenes = []\n elif self.version == 'v1.0-mini':\n train_scenes = nuscenes_split.mini_train\n val_scenes = nuscenes_split.mini_val\n else:\n raise ValueError('unknown nuscenes dataset version')\n\n available_scenes = get_available_scenes(self.nusc)\n available_scene_names = [s['name'] for s in available_scenes]\n\n train_scenes = list(\n filter(lambda x: x in available_scene_names, train_scenes))\n val_scenes = list(\n filter(lambda x: x in available_scene_names, val_scenes))\n train_scenes = set([\n available_scenes[available_scene_names.index(s)]['token']\n for s in train_scenes\n ])\n val_scenes = set([\n available_scenes[available_scene_names.index(s)]['token']\n for s in val_scenes\n ])\n\n if test:\n print('test scene: {}'.format(len(train_scenes)))\n else:\n print('train scene: {}, val scene: {}'.format(\n len(train_scenes), len(val_scenes)))\n train_nusc_infos, val_nusc_infos = _fill_trainval_infos(\n self.nusc,\n train_scenes,\n val_scenes,\n test,\n max_sweeps=self.max_sweeps)\n\n metadata = dict(version=self.version)\n\n if test:\n print('test sample: {}'.format(len(train_nusc_infos)))\n data = dict(infos=train_nusc_infos, metadata=metadata)\n pickle.dump(data, open(test_ann_cache_file, 'wb'))\n self.data_infos = data\n else:\n print('train sample: {}, val sample: {}'.format(\n len(train_nusc_infos), len(val_nusc_infos)))\n data = dict(infos=train_nusc_infos, metadata=metadata)\n\n pickle.dump(data, open(train_ann_cache_file, 'wb'))\n\n if self.mode == 'train':\n self.data_infos = data\n\n data['infos'] = val_nusc_infos\n\n pickle.dump(data, open(val_ann_cache_file, 'wb'))\n\n if self.mode == 'val':\n self.data_infos = data\n\n def _filter(self, anno: dict, box: NuScenesBox = None) -> bool:\n # filter out objects that are not being scanned\n mask = (anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 and \\\n anno['category_name'] in self.LABEL_MAP and \\\n self.LABEL_MAP[anno['category_name']] in self.class_names\n return mask\n\n def get_sweeps(self, index: int) -> List[str]:\n \"\"\"\n \"\"\"\n sweeps = []\n sample = self.data[index]\n token = sample['data'][self.channel]\n sample_data = self.nusc.get('sample_data', token)\n\n if self.max_sweeps <= 0:\n return sweeps\n\n # Homogeneous transform of current sample from ego car coordinate to sensor coordinate\n curr_sample_cs = self.nusc.get(\"calibrated_sensor\",\n sample_data[\"calibrated_sensor_token\"])\n curr_sensor_from_car = transform_matrix(\n curr_sample_cs[\"translation\"],\n Quaternion(curr_sample_cs[\"rotation\"]),\n inverse=True)\n # Homogeneous transformation matrix of current sample from global coordinate to ego car coordinate\n curr_sample_pose = self.nusc.get(\"ego_pose\",\n sample_data[\"ego_pose_token\"])\n curr_car_from_global = transform_matrix(\n curr_sample_pose[\"translation\"],\n Quaternion(curr_sample_pose[\"rotation\"]),\n inverse=True,\n )\n curr_timestamp = 1e-6 * sample_data[\"timestamp\"]\n\n prev_token = sample_data['prev']\n while len(sweeps) < self.max_sweeps - 1:\n if prev_token == \"\":\n if len(sweeps) == 0:\n sweeps.append({\n \"lidar_path\":\n osp.join(self.dataset_root, sample_data['filename']),\n \"time_lag\":\n 0,\n \"ref_from_curr\":\n None,\n })\n else:\n sweeps.append(sweeps[-1])\n else:\n prev_sample_data = self.nusc.get('sample_data', prev_token)\n # Homogeneous transformation matrix of previous sample from ego car coordinate to global coordinate\n prev_sample_pose = self.nusc.get(\n \"ego_pose\", prev_sample_data[\"ego_pose_token\"])\n prev_global_from_car = transform_matrix(\n prev_sample_pose[\"translation\"],\n Quaternion(prev_sample_pose[\"rotation\"]),\n inverse=False,\n )\n # Homogeneous transform of previous sample from sensor coordinate to ego car coordinate\n prev_sample_cs = self.nusc.get(\n \"calibrated_sensor\",\n prev_sample_data[\"calibrated_sensor_token\"])\n prev_car_from_sensor = transform_matrix(\n prev_sample_cs[\"translation\"],\n Quaternion(prev_sample_cs[\"rotation\"]),\n inverse=False,\n )\n\n curr_from_pre = reduce(\n np.dot,\n [\n curr_sensor_from_car, curr_car_from_global,\n prev_global_from_car, prev_car_from_sensor\n ],\n )\n prev_timestamp = 1e-6 * prev_sample_data[\"timestamp\"]\n time_lag = curr_timestamp - prev_timestamp\n\n sweeps.append({\n \"lidar_path\":\n osp.join(self.dataset_root, prev_sample_data['filename']),\n \"time_lag\":\n time_lag,\n \"ref_from_curr\":\n curr_from_pre,\n })\n prev_token = prev_sample_data['prev']\n return sweeps\n\n @property\n def metric(self):\n if not hasattr(self, 'nusc'):\n self.nusc = NuScenesManager.get(\n version=self.version, dataroot=self.dataset_root)\n return super().metric\n\n def collate_fn(self, batch: List):\n \"\"\"\n \"\"\"\n sample = batch[0]\n if isinstance(sample, np.ndarray):\n try:\n batch = np.stack(batch, axis=0)\n return batch\n except Exception as e:\n return batch\n elif isinstance(sample, SampleMeta):\n return batch\n return super().collate_fn(batch)" }, { "identifier": "NuscenesDetDataset", "path": "paddle3d/datasets/nuscenes/nuscenes_det.py", "snippet": "class NuscenesDetDataset(BaseDataset):\n \"\"\"\n \"\"\"\n\n VERSION_MAP = {\n 'train': 'v1.0-trainval',\n 'val': 'v1.0-trainval',\n 'trainval': 'v1.0-trainval',\n 'test': 'v1.0-test',\n 'mini_train': 'v1.0-mini',\n 'mini_val': 'v1.0-mini'\n }\n\n LABEL_MAP = {\n 'human.pedestrian.adult': 'pedestrian',\n 'human.pedestrian.child': 'pedestrian',\n 'human.pedestrian.police_officer': 'pedestrian',\n 'human.pedestrian.construction_worker': 'pedestrian',\n 'vehicle.car': 'car',\n 'vehicle.motorcycle': 'motorcycle',\n 'vehicle.bicycle': 'bicycle',\n 'vehicle.bus.bendy': 'bus',\n 'vehicle.bus.rigid': 'bus',\n 'vehicle.truck': 'truck',\n 'vehicle.construction': 'construction_vehicle',\n 'vehicle.trailer': 'trailer',\n 'movable_object.barrier': 'barrier',\n 'movable_object.trafficcone': 'traffic_cone'\n }\n\n CLASS_MAP = {\n 'pedestrian': 0,\n 'car': 1,\n 'motorcycle': 2,\n 'bicycle': 3,\n 'bus': 4,\n 'truck': 5,\n 'construction_vehicle': 6,\n 'trailer': 7,\n 'barrier': 8,\n 'traffic_cone': 9\n }\n CLASS_MAP_REVERSE = {value: key for key, value in CLASS_MAP.items()}\n\n ATTRIBUTE_MAP = {\n 'vehicle.moving': 0,\n 'vehicle.stopped': 1,\n 'vehicle.parked': 2,\n 'cycle.with_rider': 3,\n 'cycle.without_rider': 4,\n 'pedestrian.sitting_lying_down': 5,\n 'pedestrian.standing': 6,\n 'pedestrian.moving': 7,\n '': 8\n }\n ATTRIBUTE_MAP_REVERSE = {value: key for key, value in ATTRIBUTE_MAP.items()}\n\n SUPPORT_CHANNELS = [\n \"RADAR_FRONT\", \"RADAR_FRONT_LEFT\", \"RADAR_FRONT_RIGHT\",\n \"RADAR_BACK_LEFT\", \"RADAR_BACK_RIGHT\", \"LIDAR_TOP\", \"CAM_BACK\",\n \"CAM_BACK_LEFT\", \"CAM_BACK_RIGHT\", \"CAM_FRONT\", \"CAM_FRONT_LEFT\",\n \"CAM_FRONT_RIGHT\"\n ]\n\n DEFAULT_ATTRIBUTE_MAP = {\n 'car': 'vehicle.parked',\n 'pedestrian': 'pedestrian.moving',\n 'trailer': 'vehicle.parked',\n 'truck': 'vehicle.parked',\n 'bus': 'vehicle.moving',\n 'motorcycle': 'cycle.without_rider',\n 'construction_vehicle': 'vehicle.parked',\n 'bicycle': 'cycle.without_rider',\n 'barrier': '',\n 'traffic_cone': ''\n }\n\n def __init__(self,\n dataset_root: str,\n channel: str,\n mode: str = \"train\",\n transforms: Union[TransformABC, List[TransformABC]] = None,\n class_balanced_sampling: bool = False,\n class_names: Union[list, tuple] = None):\n super().__init__()\n self.dataset_root = dataset_root\n self.mode = mode.lower()\n self.channel = channel\n self.class_balanced_sampling = class_balanced_sampling\n self.class_names = class_names\n if self.class_names is None:\n self.class_names = list(self.CLASS_MAP.keys())\n\n if isinstance(transforms, list):\n transforms = T.Compose(transforms)\n\n self.transforms = transforms\n\n if self.mode not in [\n 'train', 'val', 'trainval', 'test', 'mini_train', 'mini_val'\n ]:\n raise ValueError(\n \"mode should be 'train', 'val', 'trainval', 'mini_train', 'mini_val' or 'test', but got {}.\"\n .format(self.mode))\n\n if self.channel not in self.SUPPORT_CHANNELS:\n raise ValueError('Only channel {} is supported, but got {}'.format(\n self.SUPPORT_CHANNELS, self.channel))\n\n self.version = self.VERSION_MAP[self.mode]\n self.nusc = NuScenesManager.get(\n version=self.version, dataroot=self.dataset_root)\n self._build_data(class_balanced_sampling)\n\n def _build_data(self, class_balanced_sampling):\n scenes = getattr(nuscenes_split, self.mode)\n self.data = []\n\n for scene in self.nusc.scene:\n if scene['name'] not in scenes:\n continue\n\n first_sample_token = scene['first_sample_token']\n last_sample_token = scene['last_sample_token']\n cur_token = first_sample_token\n first_sample = self.nusc.get('sample', first_sample_token)\n\n while True:\n sample = self.nusc.get('sample', cur_token)\n self.data.append(sample)\n\n if cur_token == last_sample_token:\n break\n\n cur_token = sample['next']\n\n if self.class_balanced_sampling and self.mode.lower(\n ) == 'train' and len(self.class_names) > 1:\n cls_dist = {class_name: [] for class_name in self.class_names}\n for index in range(len(self.data)):\n sample = self.data[index]\n gt_names = []\n for anno in sample['anns']:\n anno = self.nusc.get('sample_annotation', anno)\n if not self._filter(anno):\n continue\n class_name = self.LABEL_MAP[anno['category_name']]\n if class_name in self.class_names:\n gt_names.append(class_name)\n for class_name in set(gt_names):\n cls_dist[class_name].append(sample)\n\n num_balanced_samples = sum([len(v) for k, v in cls_dist.items()])\n num_balanced_samples = max(num_balanced_samples, 1)\n balanced_frac = 1.0 / len(self.class_names)\n fracs = [len(v) / num_balanced_samples for k, v in cls_dist.items()]\n sampling_ratios = [balanced_frac / frac for frac in fracs]\n\n resampling_data = []\n for samples, sampling_ratio in zip(\n list(cls_dist.values()), sampling_ratios):\n resampling_data.extend(\n np.random.choice(samples, int(\n len(samples) * sampling_ratio)).tolist())\n self.data = resampling_data\n\n def __len__(self):\n return len(self.data)\n\n def load_annotation(self, index: int, filter: Callable = None\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n \"\"\"\n bboxes = []\n labels = []\n velocities = []\n attrs = []\n\n sample = self.data[index]\n sample_data = self.nusc.get('sample_data', sample['data'][self.channel])\n ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])\n channel_pose = self.nusc.get('calibrated_sensor',\n sample_data['calibrated_sensor_token'])\n\n for anno in sample['anns']:\n box = self.nusc.get_box(anno)\n box.velocity = self.nusc.box_velocity(box.token)\n\n # from global-coord to ego-coord\n box.translate(-np.array(ego_pose['translation']))\n box.rotate(Quaternion(ego_pose['rotation']).inverse)\n\n # from ego-coord to sensor-coord\n box.translate(-np.array(channel_pose['translation']))\n box.rotate(Quaternion(channel_pose['rotation']).inverse)\n\n anno = self.nusc.get('sample_annotation', anno)\n if not anno[\n 'category_name'] in self.LABEL_MAP: # also filter [\"DontCare\", \"ignore\", \"UNKNOWN\"]\n continue\n\n # filter out objects that do not meet the conditions\n if filter and not filter(anno, box):\n continue\n\n # add velocity\n # loaded velocity may be nan when using nuscenes_devkit<=1.1.9\n # so we reset nan velocity to zero\n velocity = np.array(box.velocity)\n velocity[np.isnan(velocity)] = 0\n velocities.append(velocity[:2])\n\n # get attribute\n clsname = self.LABEL_MAP[anno['category_name']]\n label = self.class_names.index(clsname)\n\n if len(anno['attribute_tokens']) == 0:\n attr_name = self.DEFAULT_ATTRIBUTE_MAP[clsname]\n else:\n attr_token = anno['attribute_tokens'][0]\n attr_name = self.nusc.get('attribute', attr_token)['name']\n attrs.append(self.ATTRIBUTE_MAP[attr_name])\n\n # TODO: Fix me\n x, y, z = box.center\n w, l, h = box.wlh\n #yaw = box.orientation.yaw_pitch_roll[0] #TODO(luoqianhui): check this yaw\n v = np.dot(box.orientation.rotation_matrix, np.array([1, 0, 0]))\n yaw = np.arctan2(v[1], v[0])\n\n bbox3d = np.array(\n [x, y, z, w, l, h, -(yaw + np.pi / 2)\n ], #TODO(luoqianhui): check this positive sign of yaw\n dtype=np.float32)\n # loaded bounding box may be nan when using nuscenes_devkit<=1.1.9\n # so we reset nan box to zero\n bbox3d[np.isnan(bbox3d)] = 0\n bboxes.append(bbox3d)\n labels.append(label)\n\n bboxes = BBoxes3D(\n bboxes, origin=(0.5, 0.5, 0.5), velocities=np.array(velocities))\n labels = np.array(labels, dtype=np.int32)\n attrs = np.array(attrs, dtype=np.int32)\n\n return bboxes, labels, attrs\n\n def padding_sample(self, samples: List[Sample]):\n # do nothing for sweeps\n if samples[0].labels is None:\n return\n\n maxlen = max([len(sample.labels) for sample in samples])\n padding_lens = [maxlen - len(sample.labels) for sample in samples]\n\n for padlen, sample in zip(padding_lens, samples):\n if padlen == 0:\n continue\n\n _pad_item = np.ones([padlen], np.int32) * -1\n sample.labels = np.append(sample.labels, _pad_item)\n\n if sample.bboxes_2d is not None:\n _pad_item = np.zeros([padlen, sample.bboxes_2d.shape[1]],\n np.float32)\n sample.bboxes_2d = BBoxes2D(\n np.append(sample.bboxes_2d, _pad_item, axis=0))\n\n if sample.bboxes_3d is not None:\n _pad_item = np.zeros([padlen, sample.bboxes_3d.shape[1]],\n np.float32)\n sample.bboxes_3d = BBoxes3D(\n np.append(sample.bboxes_3d, _pad_item, axis=0))\n\n if sample.velocities is not None:\n _pad_item = np.zeros([padlen, 2], np.float32)\n sample.velocities = np.append(\n sample.velocities, _pad_item, axis=0)\n\n if sample.attrs is not None:\n _pad_item = np.ones([padlen], np.int32) * -1\n sample.attrs = np.append(sample.attrs, _pad_item)\n\n @property\n def metric(self):\n return NuScenesMetric(\n nuscense=self.nusc,\n mode=self.mode,\n channel=self.channel,\n class_names=self.class_names,\n attrmap=self.ATTRIBUTE_MAP_REVERSE)\n\n @property\n def name(self) -> str:\n return \"nuScenes\"\n\n @property\n def labels(self) -> List[str]:\n return self.class_names" }, { "identifier": "logger", "path": "paddle3d/utils/logger.py", "snippet": "class Logger(object):\nclass ProgressBar(object):\n def __init__(self, name: str = None):\n def format(self):\n def disable(self):\n def enable(self):\n def enabled(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, flush_interval: float = 0.1):\n def _printer():\n def progressbar(self, msg: str, flush_interval: float = 0.1):\n def range(self, stop: int, msg: str):\n def enumerate(self, iterable: Iterable, msg: str):\n def __init__(self, logger: Logger, flush_interval: float = 0.1):\n def update(self, progress: float):" } ]
import argparse import os import pickle import numpy as np import tqdm from pathlib import Path from nuscenes import NuScenes from nuscenes.can_bus.can_bus_api import NuScenesCanBus from nuscenes.utils import splits as nuscenes_split from nuscenes.utils.data_classes import Box as NuScenesBox from nuscenes.utils.geometry_utils import transform_matrix from pyquaternion import Quaternion from paddle3d.datasets.nuscenes import NuscenesMVDataset from paddle3d.datasets.nuscenes.nuscenes_det import NuscenesDetDataset from paddle3d.utils.logger import logger
10,934
rotation = last_pose.pop('orientation') can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements can_bus.extend([0., 0.]) return np.array(can_bus) def fill_trainval_infos(nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. """ train_nusc_infos = [] val_nusc_infos = [] frame_idx = 0 msg = "Begin to generate a info of nuScenes dataset." for sample_idx in logger.range(len(nusc.sample), msg=msg): sample = nusc.sample[sample_idx] lidar_token = sample['data']['LIDAR_TOP'] sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) assert os.path.exists(lidar_path) # absolute path can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) info = { 'lidar_token': lidar_token, 'lidar_path': nusc.get('sample_data', lidar_token)['filename'], # relative path 'token': sample['token'], 'prev': sample['prev'], 'next': sample['next'], 'can_bus': can_bus, 'frame_idx': frame_idx, # temporal related info 'sweeps': [], 'cams': dict(), 'scene_token': sample['scene_token'], 'lidar2ego_translation': cs_record['translation'], 'lidar2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sample['timestamp'], } if sample['next'] == '': frame_idx = 0 else: frame_idx += 1 l2e_r = info['lidar2ego_rotation'] l2e_t = info['lidar2ego_translation'] e2g_r = info['ego2global_rotation'] e2g_t = info['ego2global_translation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] for cam in camera_types: cam_token = sample['data'][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) cam_info.update(cam_intrinsic=cam_intrinsic) info['cams'].update({cam: cam_info}) # obtain sweeps for a single key-frame sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) sweeps = [] while len(sweeps) < max_sweeps: if not sd_rec['prev'] == '': sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') sweeps.append(sweep) sd_rec = nusc.get('sample_data', sd_rec['prev']) else: break info['sweeps'] = sweeps # obtain annotation if not test: annotations = [ nusc.get('sample_annotation', token) for token in sample['anns'] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) velocity = np.array( [nusc.box_velocity(token)[:2] for token in sample['anns']]) valid_flag = np.array( [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 for anno in annotations], dtype=bool).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] for i in range(len(names)): # NuscenesDetDataset.LABEL_MAP
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ # Modify from https://github.com/fundamentalvision/BEVFormer/blob/master/tools/create_data.py # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ # ------------------------------------------------------------------------ # Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) # Copyright (c) OpenMMLab. All rights reserved. # ------------------------------------------------------------------------ SENSORS = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_FRONT_LEFT' ] def parse_args(): parser = argparse.ArgumentParser( description='Create infos for kitti dataset.') parser.add_argument( '--dataset_root', default='data/nuscenes', help='Path of the dataset.', type=str) parser.add_argument( '--can_bus_root', type=str, default='data/nuscenes', help='specify the root path of nuScenes canbus') parser.add_argument( '--save_dir', default='data/nuscenes', help='Path to save the generated database.', type=str) parser.add_argument( '--mode', default='train', help='mode to generate dataset.', type=str) parser.add_argument( '--num_sweep', default=10, help='nummber of sweep frames between two key frame.', type=int) return parser.parse_args() def is_filepath(x): return isinstance(x, str) or isinstance(x, Path) def get_available_scenes(nusc): """Get available scenes from the input nuscenes class. """ available_scenes = [] logger.info('total scene num: {}'.format(len(nusc.scene))) for scene in nusc.scene: scene_token = scene['token'] scene_rec = nusc.get('scene', scene_token) sample_rec = nusc.get('sample', scene_rec['first_sample_token']) sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) has_more_frames = True scene_not_exist = False while has_more_frames: lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] # relative path if not is_filepath(lidar_path): scene_not_exist = True break else: break if scene_not_exist: continue available_scenes.append(scene) logger.info('exist scene num: {}'.format(len(available_scenes))) return available_scenes def obtain_sensor2top(nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type='lidar'): """Obtain the info with RT matric from general sensor to Top LiDAR. """ sd_rec = nusc.get('sample_data', sensor_token) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) data_path = str(nusc.get_sample_data_path(sd_rec['token'])) # absolute path if os.getcwd() in data_path: # path from lyftdataset is absolute path data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path sweep = { 'data_path': nusc.get('sample_data', sd_rec['token'])['filename'], # relative path 'type': sensor_type, 'sample_data_token': sd_rec['token'], 'sensor2ego_translation': cs_record['translation'], 'sensor2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sd_rec['timestamp'] } l2e_r_s = sweep['sensor2ego_rotation'] l2e_t_s = sweep['sensor2ego_translation'] e2g_r_s = sweep['ego2global_rotation'] e2g_t_s = sweep['ego2global_translation'] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T ) + l2e_t @ np.linalg.inv(l2e_r_mat).T sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T sweep['sensor2lidar_translation'] = T return sweep def _get_can_bus_info(nusc, nusc_can_bus, sample): scene_name = nusc.get('scene', sample['scene_token'])['name'] sample_timestamp = sample['timestamp'] try: pose_list = nusc_can_bus.get_messages(scene_name, 'pose') except: return np.zeros(18) # server scenes do not have can bus information. can_bus = [] # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp last_pose = pose_list[0] for i, pose in enumerate(pose_list): if pose['utime'] > sample_timestamp: break last_pose = pose _ = last_pose.pop('utime') # useless pos = last_pose.pop('pos') rotation = last_pose.pop('orientation') can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements can_bus.extend([0., 0.]) return np.array(can_bus) def fill_trainval_infos(nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. """ train_nusc_infos = [] val_nusc_infos = [] frame_idx = 0 msg = "Begin to generate a info of nuScenes dataset." for sample_idx in logger.range(len(nusc.sample), msg=msg): sample = nusc.sample[sample_idx] lidar_token = sample['data']['LIDAR_TOP'] sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) cs_record = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) assert os.path.exists(lidar_path) # absolute path can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) info = { 'lidar_token': lidar_token, 'lidar_path': nusc.get('sample_data', lidar_token)['filename'], # relative path 'token': sample['token'], 'prev': sample['prev'], 'next': sample['next'], 'can_bus': can_bus, 'frame_idx': frame_idx, # temporal related info 'sweeps': [], 'cams': dict(), 'scene_token': sample['scene_token'], 'lidar2ego_translation': cs_record['translation'], 'lidar2ego_rotation': cs_record['rotation'], 'ego2global_translation': pose_record['translation'], 'ego2global_rotation': pose_record['rotation'], 'timestamp': sample['timestamp'], } if sample['next'] == '': frame_idx = 0 else: frame_idx += 1 l2e_r = info['lidar2ego_rotation'] l2e_t = info['lidar2ego_translation'] e2g_r = info['ego2global_rotation'] e2g_t = info['ego2global_translation'] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', ] for cam in camera_types: cam_token = sample['data'][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) cam_info.update(cam_intrinsic=cam_intrinsic) info['cams'].update({cam: cam_info}) # obtain sweeps for a single key-frame sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) sweeps = [] while len(sweeps) < max_sweeps: if not sd_rec['prev'] == '': sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') sweeps.append(sweep) sd_rec = nusc.get('sample_data', sd_rec['prev']) else: break info['sweeps'] = sweeps # obtain annotation if not test: annotations = [ nusc.get('sample_annotation', token) for token in sample['anns'] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) velocity = np.array( [nusc.box_velocity(token)[:2] for token in sample['anns']]) valid_flag = np.array( [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 for anno in annotations], dtype=bool).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] for i in range(len(names)): # NuscenesDetDataset.LABEL_MAP
if names[i] in NuscenesDetDataset.LABEL_MAP:
1
2023-11-08 07:08:03+00:00
16k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)" }, { "identifier": "LayerNorm", "path": "rhofold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "Rigid", "path": "rhofold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "rhofold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" }, { "identifier": "RNAAlphabet", "path": "rhofold/utils/alphabet.py", "snippet": "class RNAAlphabet(Alphabet):\n\n def get_batch_converter(self):\n if self.use_msa:\n return RNAMSABatchConverter(self)\n else:\n return BatchConverter(self)\n\n @classmethod\n def from_architecture(cls, name: str, ) -> \"RNAAlphabet\":\n if name in (\"RNA MSA Transformer\", \"rna_msa_transformer\", \"RNA\"):\n standard_toks = rna_msaseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(\n standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa\n )" }, { "identifier": "RNAConverter", "path": "rhofold/utils/converter.py", "snippet": "class RNAConverter():\n \"\"\"RNA Structure Converter.\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n self.eps = 1e-4\n self.__init()\n\n def __init(self):\n \"\"\"\"\"\"\n\n self.cord_dict = defaultdict(dict)\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n for atom_name, _, cord_vals in RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]:\n self.cord_dict[resd_name][atom_name] = torch.tensor(cord_vals, dtype=torch.float32)\n\n trans_dict_all = {}\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n trans_dict = {}\n cord_dict = {}\n\n atom_infos = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n angl_infos = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n n_angls = len(angl_infos)\n \n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == 0:\n cord_dict[atom_name] = self.cord_dict[resd_name][atom_name]\n\n trans_dict['omega-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n trans_dict['phi-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n\n for idx_angl, (angl_name, _, atom_names_sel) in enumerate(angl_infos):\n x1 = cord_dict[atom_names_sel[0]]\n x2 = cord_dict[atom_names_sel[1]]\n x3 = cord_dict[atom_names_sel[2]]\n rot, tsl_vec = calc_rot_tsl(x1, x3, x3 + (x3 - x2))\n trans_dict['%s-main' % angl_name] = (rot, tsl_vec)\n\n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == idx_angl + 3:\n cord_dict[atom_name] = tsl_vec + torch.sum(\n rot * self.cord_dict[resd_name][atom_name].view(1, 3), dim=1)\n\n for idx_angl_src in range(1, n_angls - 1):\n idx_angl_dst = idx_angl_src + 1\n angl_name_src = angl_infos[idx_angl_src][0]\n angl_name_dst = angl_infos[idx_angl_dst][0]\n rot_src, tsl_vec_src = trans_dict['%s-main' % angl_name_src]\n rot_dst, tsl_vec_dst = trans_dict['%s-main' % angl_name_dst]\n rot = torch.matmul(rot_src.transpose(1, 0), rot_dst)\n tsl_vec = torch.matmul(rot_src.transpose(1, 0), tsl_vec_dst - tsl_vec_src)\n trans_dict['%s-%s' % (angl_name_dst, angl_name_src)] = (rot, tsl_vec)\n\n trans_dict_all[resd_name] = trans_dict\n\n self.trans_dict_init = trans_dict_all\n\n def build_cords(self, seq, fram, angl, rtn_cmsk=False):\n\n # initialization\n n_resds = len(seq)\n device = angl.device\n\n angl = angl.squeeze(dim=0) / (torch.norm(angl.squeeze(dim=0), dim=2, keepdim=True) + self.eps)\n rigid = Rigid.from_tensor_7(fram, normalize_quats=True)\n fram = rigid.to_tensor_4x4()\n rot = fram[:,:,:3,:3]\n tsl = fram[:,:,:3,3:].permute(0,1,3,2)\n\n fram = torch.cat([rot, tsl], dim=2)[:,:,:4,:3].permute(1,0,2,3)\n fmsk = torch.ones((n_resds, 1), dtype=torch.int8, device=device)\n amsk = torch.ones((n_resds, RNA_CONSTANTS.N_ANGLS_PER_RESD_MAX), dtype=torch.int8, device=device)\n cord = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX, 3), dtype=torch.float32, device=device)\n cmsk = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX), dtype=torch.int8, device=device)\n\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n idxs = [x for x in range(n_resds) if seq[x] == resd_name]\n if len(idxs) == 0:\n continue\n cord[idxs], cmsk[idxs] =\\\n self.__build_cord(resd_name, fram[idxs], fmsk[idxs], angl[idxs], amsk[idxs])\n\n return (cord, cmsk) if rtn_cmsk else (cord)\n\n def __build_cord(self, resd_name, fram, fmsk, angl, amsk):\n \"\"\"\"\"\"\n\n # initialization\n device = fram.device\n n_resds = fram.shape[0]\n atom_names_all = RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]\n atom_names_pad = atom_names_all + ['X'] * (RNA_CONSTANTS.ATOM_NUM_MAX - len(atom_names_all))\n atom_infos_all = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n\n cord_dict = defaultdict(\n lambda: torch.zeros((n_resds, 3), dtype=torch.float32, device=device))\n cmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n fram_null = torch.tensor(\n [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]], dtype=torch.float32, device=device)\n fram_dict = defaultdict(lambda: fram_null.unsqueeze(dim=0).repeat(n_resds, 1, 1))\n fmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n trans_dict = {'main': (fram[:, 0, :3], fram[:, 0, 3])}\n\n rot_curr, tsl_curr = trans_dict['main']\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == 0]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk[:, 0]\n\n # determine 3D coordinates of atoms belonging to side-chain rigid-groups\n angl_infos_all = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n rgrp_names_all = ['omega', 'phi'] + [x[0] for x in angl_infos_all]\n\n for idx_rgrp, rgrp_name_curr in enumerate(rgrp_names_all):\n if rgrp_name_curr in ['omega', 'phi', 'angl_0', 'angl_1']:\n rgrp_name_prev = 'main'\n else:\n rgrp_name_prev = 'angl_%d' % (int(rgrp_name_curr[-1]) - 1)\n\n rot_prev, tsl_prev = trans_dict[rgrp_name_prev]\n rot_base, tsl_vec_base = \\\n self.trans_dict_init[resd_name]['%s-%s' % (rgrp_name_curr, rgrp_name_prev)]\n rot_base = rot_base.unsqueeze(dim=0).to(device)\n tsl_base = tsl_vec_base.unsqueeze(dim=0).to(device)\n \n rot_addi, tsl_addi = calc_angl_rot_tsl(angl[:, idx_rgrp])\n rot_curr, tsl_curr = merge_rot_tsl(\n rot_prev, tsl_prev, rot_base, tsl_base, rot_addi, tsl_addi)\n trans_dict[rgrp_name_curr] = (rot_curr, tsl_curr)\n\n fram_dict[rgrp_name_curr] = \\\n torch.cat([rot_curr, tsl_curr.unsqueeze(dim=1)], dim=1)\n fmsk_vec_dict[rgrp_name_curr] = fmsk[:, 0] * amsk[:, idx_rgrp]\n\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == idx_rgrp + 1]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk_vec_dict[rgrp_name_curr]\n\n cmsk = torch.stack([cmsk_vec_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n cord = torch.stack([cord_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n\n return cord, cmsk\n\n def export_pdb_file(self, seq, atom_cords, path, atom_masks=None, confidence=None, chain_id=None, logger = None):\n \"\"\"Export a PDB file.\"\"\"\n\n # configurations\n i_code = ' '\n chain_id = '0' if chain_id is None else chain_id\n occupancy = 1.0\n cord_min = -999.0\n cord_max = 999.0\n seq_len = len(seq)\n\n n_key_atoms = RNA_CONSTANTS.ATOM_NUM_MAX\n\n # take all the atom coordinates as valid, if not specified\n if atom_masks is None:\n atom_masks = np.ones(atom_cords.shape[:-1], dtype=np.int8)\n\n # determine the set of atom names (per residue)\n if atom_cords.ndim == 2:\n if atom_cords.shape[0] == seq_len * n_key_atoms:\n atom_cords = np.reshape(atom_cords, [seq_len, n_key_atoms, 3])\n atom_masks = np.reshape(atom_masks, [seq_len, n_key_atoms])\n else:\n raise ValueError('atom coordinates\\' shape does not match the sequence length')\n\n elif atom_cords.ndim == 3:\n assert atom_cords.shape[0] == seq_len\n atom_cords = atom_cords\n atom_masks = atom_masks\n\n else:\n raise ValueError('atom coordinates must be a 2D or 3D np.ndarray')\n\n # reset invalid values in atom coordinates\n atom_cords = np.clip(atom_cords, cord_min, cord_max)\n atom_cords[np.isnan(atom_cords)] = 0.0\n atom_cords[np.isinf(atom_cords)] = 0.0\n\n # export the 3D structure to a PDB file\n os.makedirs(os.path.dirname(os.path.realpath(path)), exist_ok=True)\n with open(path, 'w') as o_file:\n n_atoms = 0\n for idx_resd, resd_name in enumerate(seq):\n for idx_atom, atom_name in enumerate(RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]):\n\n temp_factor = 0.0 if confidence is None else \\\n float(100 * confidence.reshape([seq_len])[idx_resd - 1])\n\n if atom_masks[idx_resd, idx_atom] == 0:\n continue\n n_atoms += 1\n charge = atom_name[0]\n line_str = ''.join([\n 'ATOM ',\n '%5d' % n_atoms,\n ' ' + atom_name + ' ' * (3 - len(atom_name)),\n ' %s' % resd_name,\n ' %s' % chain_id,\n ' ' * (4 - len(str(idx_resd + 1))),\n '%s' % str(idx_resd + 1),\n '%s ' % i_code,\n '%8.3f' % atom_cords[idx_resd, idx_atom, 0],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 1],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 2],\n '%6.2f' % occupancy,\n '%6.2f' % temp_factor,\n ' ' * 10,\n '%2s' % charge,\n '%2s' % ' ',\n ])\n assert len(line_str) == 80, 'line length must be exactly 80 characters: ' + line_str\n o_file.write(line_str + '\\n')\n\n if logger is not None:\n logger.info(f' Export PDB file to {path}')" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
10,951
self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor],
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps self.fn = torch.nn.LayerNorm(1) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) phase = self.fn(norm) return phase * normed_coors # classes class EGNN(torch.nn.Module): def __init__( self, dim, m_dim=32, ): super().__init__() ''' # Most of the code in this file is based on egnn-pytorch by lucidrains. ''' edge_input_dim = (dim * 2) + 1 self.edge_mlp = torch.nn.Sequential( torch.nn.Linear(edge_input_dim, edge_input_dim * 2), SiLU(), torch.nn.Linear(edge_input_dim * 2, m_dim), SiLU() ) self.coors_norm = CoorsNorm() self.node_mlp = torch.nn.Sequential( torch.nn.Linear(dim + m_dim, dim * 2), SiLU(), torch.nn.Linear(dim * 2, dim), ) self.coors_mlp = torch.nn.Sequential( torch.nn.Linear(m_dim, m_dim * 4), SiLU(), torch.nn.Linear(m_dim * 4, 1) ) def forward(self, feats, coors): rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d') rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True) feats_j = rearrange(feats, 'b j d -> b () j d') feats_i = rearrange(feats, 'b i d -> b i () d') feats_i, feats_j = torch.broadcast_tensors(feats_i, feats_j) edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1) m_ij = self.edge_mlp(edge_input) coor_weights = self.coors_mlp(m_ij) coor_weights = rearrange(coor_weights, 'b i j () -> b i j') rel_coors = self.coors_norm(rel_coors) scale_factor = 1 / 50.0 coors_out = torch.einsum('b i j, b i j c -> b i c', coor_weights * scale_factor, rel_coors) + coors m_i = m_ij.sum(dim=-2) node_mlp_input = torch.cat((feats, m_i), dim=-1) node_out = self.node_mlp(node_mlp_input) + feats return node_out, coors_out class ResEGNN(torch.nn.Module): def __init__(self, corrections=4, dims_in=41, **kwargs): super().__init__() self.layers = torch.nn.ModuleList([EGNN(dim=dims_in, **kwargs) for _ in range(corrections)]) def forward(self, amino, geom, is_fea = False, keep_last_cords = None): output = [] for layer in self.layers: geom_init = geom amino, geom = layer(amino, geom) if keep_last_cords is not None: geom[:, -keep_last_cords:] = geom_init[:, -keep_last_cords:] output.append([amino, geom]) return output if is_fea else geom class PosEmbedding(nn.Embedding): """ """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: torch.Tensor): """Input is expected to be of size [bsz x seqlen].""" mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden) self.linear_2 = Linear(self.c_hidden, self.c_hidden) self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor],
r: Rigid,
2
2023-11-01 10:29:08+00:00
16k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): # change to 0.01\n for char in text:\n print(char, end='', flush=True)\n time.sleep(delay)\n print()" }, { "identifier": "shop_help", "path": "components/common_functions.py", "snippet": "def shop_help():\n print_slow(Fore.YELLOW + \"Shop Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[buy] - Use the 'buy [upgrade]' command to purchase the upgrade in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "help_user", "path": "components/common_functions.py", "snippet": "def help_user():\n print_slow(Fore.MAGENTA + \"Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[connect] - Use the 'connect' command to hack into Enigma Corps network.\")\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to view and respond to emails from your client and other characters.\")\n print_slow(\"\")\n print_slow(\"[balance] - Use the 'balance' command to view your current earnings which you can spend on upgrades. \")\n print_slow(\"\")\n print_slow(\"[shop] - Use the 'shop' command to view upgrades available in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[help] - Use the 'help' command if you need assistance at any time.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the Main Menu.\")\n print_slow(\"\")" }, { "identifier": "connect_help", "path": "components/common_functions.py", "snippet": "def connect_help():\n print_slow(Fore.MAGENTA + \"Connect Help:\" + Style.RESET_ALL)\n print_slow(\n \"[scan] - Use the 'scan' command to scan the network and search for available systems and vulnerabilities.\")\n print_slow(\"\")\n print_slow(\"[hack] - Use the 'hack [system/vulnerability]' to hack into different systems.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[disconnect] - Use the 'disconnect' command to disconnect from the current system or vulnerability.\")\n print_slow(\"\")" }, { "identifier": "mail_help", "path": "components/common_functions.py", "snippet": "def mail_help():\n print_slow(Fore.LIGHTBLUE_EX + \"Mail Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list all emails.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [subject]' command to read an email with the specified subject.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "system_help", "path": "components/common_functions.py", "snippet": "def system_help():\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to log into the users emails.\")\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list files in a users system.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [file]' command to read files in a users system\")\n print_slow(\"\")" }, { "identifier": "intro_call", "path": "conversations/calls.py", "snippet": "def intro_call():\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Welcome, Cipher. Operation Enigma is our covert mission against Enigma Corp, a powerful and secretive entity.\")\n print_slow(\n \"Your skills and secrecy have brought you to our attention. Your mission is to dig through their systems and servers looking for valuable data.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Got it, Anonymous. Exposing secrets and bringing justice. I'm in.\")\n print_slow(\"What's my first move? Talk to me about this 'EnigmaLink'.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Excellent, Cipher. EnigmaLink is a specialized tool available on the Hacker's Market. It contains a hidden backdoor, allowing access to Enigma Corps servers.\")\n print_slow(\n \"Your task is to acquire EnigmaLink and initiate your infiltration. Use the 'connect' command to navigate the network and gather crucial intelligence.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"EnigmaLink, got it. I'll secure it and initiate the infiltration. What about this employee, Amy?\")\n print_slow(\"You mentioned her password is 'sexinthecity.' What's my objective with her?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Good question, Cipher. Amy is a key target. Use her password to access her computer and gather any pertinent information.\")\n print_slow(\n \"This data is vital to our cause. Be thorough and meticulous in your investigation. The success of our operation depends on it.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Understood, Anonymous. I'll focus on Amy, gather intel, and proceed with precision.\")\n print_slow(\"Consider it done. Anything else I should know before I dive in?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"One last thing, Cipher. All collected data is highly confidential. This contract is binding, and your success is paramount.\")\n print_slow(\"Execute with diligence, and may the odds be in your favor. Good luck, Cipher.\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "first_call", "path": "conversations/calls.py", "snippet": "def first_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"That's a good start, but we already have that information.\")\n print_slow(\"Regardless, I've transferred £20 into the account for your troubles.\")\n print_slow(\"Keep digging Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "second_call", "path": "conversations/calls.py", "snippet": "def second_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Hey Cipher, you nailed it! 'Billy' just spilled the beans about wanting to climb the corporate ladder into management.\")\n print_slow(\n \"This is gold for us. We can guide 'Billy' toward training and workshops that align with our interests, nudging things in our favor.\")\n print_slow(\n \"Picture it – we're pulling the strings, helping 'Billy' grow, and steering the ship where we want it to go.\")\n print_slow(\"Keep the ball rolling, Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "third_call", "path": "conversations/calls.py", "snippet": "def third_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\"\n \"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've stumbled upon a perplexing development regarding Enigma's interest in a mysterious 'compound.'\")\n print_slow(\n \"I'm cross-referencing our existing intel to unveil more details. Stay vigilant and be prepared for the unknown.\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A compound, huh? Any hints on whether we're talking metal, chemicals, or something else entirely?\")\n print_slow(\"This feels like navigating in the dark. What exactly am I dealing with?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Response\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"Cipher, we're in the dark too. Initial reports are unclear—could be metal, chemical, or something beyond our comprehension.\")\n print_slow(\n \"Your mission is to identify the nature of this compound. Exercise extreme caution; this goes deeper than we anticipated.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Inquiry\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"So, we're playing 'guess the compound.' Any leads, any connections I should explore?\")\n print_slow(\"This is starting to sound like one of those high-stakes puzzles.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Clarification\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"I wish I had more details, Cipher. This is uncharted territory for us. Investigate discreetly, and trust no one.\")\n print_slow(\n \"I'll attempt to gather more intel. Stay on the line, and keep me updated on any findings.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fourth_call", "path": "conversations/calls.py", "snippet": "def fourth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've got our hands on an intriguing document – an Employee Performance Review for 'Billy Constantine'.\")\n print_slow(\n \"This could be a goldmine of information. Let's dig in and see if there's anything we can leverage to our advantage.\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"An Employee Performance Review? Interesting choice. What's the scoop on 'Billy Constantine'?\")\n print_slow(\"Give me the details, and we'll figure out our next move.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, 'Billy Constantine' is making waves. The review highlights exceptional performance as a sales representative.\")\n print_slow(\n \"He's exceeding sales targets, mentoring new team members, and earning a solid 4.5/5 rating. A rising star, it seems.\")\n print_slow(\"We might use this to our advantage. Let's explore how we can align his ambitions with our agenda.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Strategy\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A high-performing sales rep, huh? We could steer 'Billy' towards projects that align with our goals.\")\n print_slow(\"Let's use this performance review to our advantage. Maybe mentorship programs, leadership initiatives?\")\n print_slow(\"I'm ready to play this card strategically. What's the next move?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Great thinking, Cipher. Let's work on a plan to subtly guide 'Billy' toward initiatives that benefit us.\")\n print_slow(\"We'll need to dig deeper into 'Billy's' aspirations and weave our influence seamlessly.\")\n print_slow(\"Stay vigilant, Cipher. This could be a game-changer.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fifth_call", "path": "conversations/calls.py", "snippet": "def fifth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've intercepted some Meeting Minutes dated 24/06/2025. It's related to 'Project X' and involves key players.\")\n print_slow(\n \"This could be our chance to uncover more about Enigma's activities. Let's dive into the details and see what we can extract.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"Meeting Minutes, huh? 'Project X' sounds intriguing. Who were the players involved, and what's the agenda?\")\n print_slow(\"I'm ready to dissect this information and uncover any hidden gems.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, the meeting involved key personnel—Amy, Billy, Kyle, and others. 'Project X' is on the agenda, and there's mention of sensitive materials.\")\n print_slow(\n \"This could be a crucial insight into Enigma's plans. Let's analyze the action items and plan our next move.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Analysis\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"'Project X,' sensitive materials, and action items. This is a goldmine of information.\")\n print_slow(\n \"Let's focus on dissecting the action items and see if we can connect the dots. What's our strategy, Anonymous?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Agreed, Cipher. Let's delve into the action items, especially the data compilation and safety protocol training.\")\n print_slow(\"We might uncover more about 'Project X' and gain insights into Enigma's plans.\")\n print_slow(\"Stay sharp, Cipher. This could be a pivotal moment in our mission.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "sixth_call", "path": "conversations/calls.py", "snippet": "def sixth_call():\n print_slow(\"ADD CALL STUFF HERE\")" }, { "identifier": "markus_seen_call", "path": "conversations/calls.py", "snippet": "def markus_seen_call():\n print_slow(\"Something goes here\")" }, { "identifier": "code_shatter_call", "path": "conversations/minigame_calls.py", "snippet": "def code_shatter_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"I see you have bought CodeShatter!\")\n print_slow(\"This item is a one time use upgrade so once you get the password, it is gone so use wisely!\")\n print_slow(\"But don't threat, if you fail, you get a chance to retry. The item is only used when you get the password, so be sure to write it down!\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "code_shatter_minigame", "path": "minigames/code_shatter_minigame.py", "snippet": "def code_shatter_minigame():\n # Generate a random 5-digit number\n target = [str(random.randint(1, 9)) for _ in range(5)]\n\n print_slow(\"Welcome to CodeShatter!\")\n print_slow(\"\")\n print_slow(\"Guess the 5-digit number.\")\n print_slow(\"\")\n print_slow(\"The sequence can contain multiple same numbers\")\n print_slow(\"\")\n print_slow(Fore.GREEN + \"Green: Correct digit in correct position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"Orange: Correct digit in incorrect position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Red: Incorrect digit.\" + Style.RESET_ALL)\n print_slow(\"\")\n\n attempts = 0\n while attempts < 7:\n # Get the user's guess\n guess = input(\"Enter your guess: \")\n\n if len(guess) != 5 or not guess.isdigit():\n print_slow(\"Invalid input. Please enter a 5-digit number.\")\n continue\n\n attempts += 1\n\n # Check the guess against the target\n feedback = []\n for i in range(5):\n if guess[i] == target[i]:\n feedback.append(Fore.GREEN + guess[i] + Style.RESET_ALL)\n elif guess[i] in target:\n feedback.append(Fore.YELLOW + guess[i] + Style.RESET_ALL)\n else:\n feedback.append(Fore.RED + guess[i] + Style.RESET_ALL)\n\n print_slow(\"Feedback: \" + \" \".join(feedback))\n\n # Check if the guess is correct\n if guess == \"\".join(target):\n print_slow(Fore.GREEN + \"Access granted.\" + Style.RESET_ALL)\n break\n else:\n print_slow(Fore.RED + \"Access denied. Too many attempts.\" + Style.RESET_ALL)\n time.sleep(1)\n print_slow(\"\")\n print_slow(Fore.RED + \"Rebooting CodeShatter with new proxy...\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n code_shatter_minigame()" }, { "identifier": "port_scanning", "path": "minigames/eye_spy_minigame.py", "snippet": "def port_scanning():\n num_ports = 10\n open_ports, closed_ports = generate_ports(num_ports)\n attempts = 5\n correct_guesses = 0\n scan_attempts = 2\n\n print_slow(\"Welcome to the Port Scanning minigame!\")\n print_slow(\"\")\n print_slow(f\"Find the open ports in the range 1-{num_ports}.\")\n print_slow(\"\")\n print_slow(f\"You have {attempts} attempts.\")\n print_slow(\"\")\n\n while scan_attempts > 0:\n print_slow(\"\")\n print_slow(f\"\\nYou have {scan_attempts} scan attempts left.\")\n print_slow(\"\")\n start = int(input(\"Enter the start of the range to scan: \"))\n print_slow(\"\")\n end = int(input(\"Enter the end of the range to scan: \"))\n print_slow(\"\")\n\n num_open_ports_in_range = len(open_ports.intersection(range(start, end + 1)))\n print_slow(\"\")\n print_slow(f\"There are {num_open_ports_in_range} open ports in the range {start}-{end}.\")\n\n scan_attempts -= 1\n\n while attempts > 0 and len(open_ports) > 0:\n port = int(input(\"\\nEnter a port number to guess: \"))\n\n if port in open_ports:\n print_slow(Fore.GREEN + \"Port is open!\" + Style.RESET_ALL)\n open_ports.remove(port)\n correct_guesses += 1\n elif port in closed_ports:\n print_slow(Fore.RED + \"Port is closed.\" + Style.RESET_ALL)\n closed_ports.remove(port)\n else:\n print_slow(\"Invalid port number. Please enter a number between 1 and\", num_ports)\n\n attempts -= 1\n\n if len(open_ports) == 0:\n print_slow(\n Fore.GREEN + \"\\nCongratulations! You have successfully found all the open ports and gained access to the camera.\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n else:\n print_slow(\n Fore.RED + f\"\\nHack Failed! You found {correct_guesses} out of {len(open_ports) + correct_guesses} open ports.\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n port_scanning()" }, { "identifier": "AmySystem", "path": "systems/level_1/amy/amy_system.py", "snippet": "class AmySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"return_to_work_form.txt\",\n \"content\": (\n \"Employee Name: _______________\\n\"\n \"Employee ID: ____________\\n\"\n \"Department: _______________\\n\"\n \"Date of Return: ______\\n\\n\"\n \"I, [Employee Name], certify that I have followed the company's \"\n \"guidelines for returning to work after an absence. \"\n \"I understand that it is my responsibility to adhere to all safety \"\n \"protocols and procedures to ensure the health and well-being of my \"\n \"colleagues and myself.\\n\\n\"\n \"I acknowledge that I have completed any necessary training and have \"\n \"been briefed on any updates to the company's policies and procedures. \"\n \"I am aware that I must report any symptoms or exposure to COVID-19 to \"\n \"my supervisor immediately.\\n\\n\"\n \"I am committed to doing my part to maintain a safe and healthy work \"\n \"environment for everyone. I will continue to follow all guidelines \"\n \"and protocols and will cooperate with any additional measures that \"\n \"may be implemented in the future.\\n\\n\"\n \"Signature: [Employee Signature]\\n\"\n \"Date: [Date]\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"benefits_summary.txt\",\n \"content\": (\n \"At Enigma Corps, we believe in taking care of our employees and \"\n \"offer a comprehensive benefits package to support your health, well-being, \"\n \"and financial security. Below is a summary of the benefits available to \"\n \"you as an employee of Enigma Corps.\\n\\n\"\n \"Health Insurance: We offer a choice of medical, dental, and vision \"\n \"plans to meet your needs. Our plans provide coverage for preventive care, \"\n \"hospitalization, prescription drugs, and more.\\n\\n\"\n \"Retirement Savings: We offer a 401(k) plan with a generous company \"\n \"match to help you save for your future. You can choose from a variety of \"\n \"investment options to suit your needs.\\n\\n\"\n \"Paid Time Off: We provide a generous amount of paid time off, \"\n \"including vacation, sick leave, and holiday pay. We also offer paid \"\n \"parental leave for new parents.\\n\\n\"\n \"Flexible Work Arrangements: We understand the importance of work-life \"\n \"balance and offer flexible work arrangements, such as remote work and \"\n \"flexible schedules, where possible.\\n\\n\"\n \"Wellness Programs: We offer a variety of wellness programs and \"\n \"resources to support your physical and mental health, including fitness \"\n \"classes, stress management programs, and counseling services.\\n\\n\"\n \"Professional Development: We are committed to supporting your growth \"\n \"and development and offer a variety of training and development \"\n \"opportunities, including tuition reimbursement, workshops, and seminars.\"\n \"\\n\\n\"\n \"We encourage you to review this summary carefully and take advantage of \"\n \"the benefits available to you. If you have any questions or need further \"\n \"information, please contact the HR department.\"\n )\n },\n ]\n self.emails = [\n {\n \"sender\": \"Amy\",\n \"subject\": \"Can't Stop Thinking About You\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I hope this message finds you in good spirits. I've been meaning to write to you for a while now, but I couldn't find the right words to express what I've been feeling.\\n\\n\"\n \"Ever since that night we spent together, I can't seem to get you out of my mind. There's something about the way you make me feel that I've never experienced before. \"\n \"\\nIt's exhilarating, yet terrifying all at the same time.\\n\\n\"\n \"I know we both have a lot on our plates right now, and I don't want to add any more stress to your life. But I can't help but wonder what could happen if we gave this a real shot. \"\n \"I know it's complicated, and there are a lot of factors to consider, but I think we owe it to ourselves to explore this connection we have.\\n\\n\"\n \"I understand if you're not ready to take that step, and I don't want to pressure you into anything you're not comfortable with. \"\n \"\\nBut I can't shake the feeling that we could have something truly special together.\\n\\n\"\n \"I'd love to hear your thoughts on this, and I'm more than willing to take things slow if that's what you need. Maybe we could meet up for dinner and talk about it in person?\"\n \" I think it would be easier to have this conversation face-to-face.\\n\\n\"\n \"I hope you're doing well, and I look forward to hearing from you soon.\\n\\n\"\n \"Take care,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and ask for your help on the Smith project. I've been having some trouble with the data analysis portion,\"\n \"\\nand I know you have a lot of experience in that area.\\n\\n\"\n \"The project involves analyzing customer feedback data to identify trends and areas for improvement. I've been working on it for a few weeks now, but I'm finding it challenging to make sense of the data and\"\n \"\\ndraw meaningful conclusions.\\n\\n\"\n \"Would you be available for a quick meeting later this week to go over some of the data with me? I would really appreciate your input and guidance on this. \"\n \"\\nI think your expertise could really help me make progress and ensure the success of the project.\\n\\n\"\n \"If you're available, please let me know your preferred date and time, and I'll send out a calendar invite. I'm flexible and can work around your schedule.\\n\\n\"\n \"Thank you in advance for your help, and I look forward to hearing from you soon.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Request for Time Off\",\n \"body\": (\n \"Good Afternoon Katie,\\n\\n\"\n \"I hope this email finds you well. I wanted to request some time off next month for a family vacation. I am planning to be out of the office from 10/09/2024 to 18/09/2024\\n\\n\"\n \"I have been working hard on the Johnson project and have made significant progress. I will make sure to finish up any outstanding work and hand off any ongoing projects to my colleagues before I leave. I will also be available by email in case of any urgent matters.\\n\\n\"\n \"I understand that this is a busy time for the team, and I want to ensure that my absence doesn't cause any disruptions. I have already spoken to Markus and he has kindly agreed to cover for me while I'm away.\\n\\n\"\n \"Thank you for considering my request. I look forward to spending some quality time with my family and coming back to work refreshed and recharged.\"\n \"\\nI am confident that the time off will help me come back with renewed energy and focus.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Apology for the Mistake\",\n \"body\": (\n \"Good Morning Kyle,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and apologize for the mistake I made on the Johnson report. I realize now that I overlooked some important data, and I take full responsibility for it.\\n\\n\"\n \"I have gone back and corrected the report, and I will make sure to double-check my work in the future to avoid any similar mistakes. I have also attached the updated report for your reference.\\n\\n\"\n \"I understand if you are disappointed or frustrated, and I am more than willing to do whatever it takes to make it right. Please let me know if there's anything else I can do to fix this,\"\n \"\\nor if you would like to discuss this further.\\n\\n\"\n \"Once again, I am truly sorry for the mistake, and I appreciate your understanding. I value our working relationship and hope that this incident doesn't tarnish it. I am committed to making amends and ensuring that this doesn't happen again in the future.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I wanted to take a moment to express my gratitude for allowing me to use your computer while mine was being serviced by IT. \"\n \"It was a huge help and allowed me to stay productive during that time.\\n\\n\"\n \"I also noticed that your password is 'football'. While I understand it's easy to remember, it's important to choose a more secure password to protect your accounts.\"\n \"\\nI would recommend changing it to something more complex and unique. You never know who's watching after all.\\n\\n\"\n \"Thanks again for your generosity and understanding.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "BillySystem", "path": "systems/level_1/billy/billy_system.py", "snippet": "class BillySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"cover_letter.txt\",\n \"content\": (\n \"Dear Hiring Manager,\\n\\n\"\n \"I am writing to express my interest in the management position at Enigma Corps. \"\n \"I have been with the company for over 7 years and have consistently demonstrated my commitment to driving excellence and fostering collaboration within the team.\\n\\n\"\n \"During my tenure at Enigma Corps, I have been involved in various projects, including the successful completion of the Q3 deliverables project, where I played a key role in the planning and execution stages. \"\n \"My dedication to achieving project milestones and my ability to work under pressure make me a strong candidate for a management role.\\n\\n\"\n \"I possess strong leadership skills, which I have honed through my experiences in leading teams and coordinating cross-functional efforts. \"\n \"My ability to communicate effectively and build positive relationships with team members and stakeholders has resulted in successful project outcomes and increased productivity.\\n\\n\"\n \"In addition to my technical and leadership skills, I am also committed to continuous learning and professional development. \"\n \"I have participated in various training programs and workshops to enhance my management skills and stay up-to-date with industry trends and best practices.\\n\\n\"\n \"I am excited about the opportunity to contribute to the growth and success of Enigma Corps as a member of the management team. \"\n \"I am confident that my skills and experience will be valuable assets to the company, and I look forward to the opportunity to work closely with the team to drive innovation and excellence.\\n\\n\"\n \"Thank you for considering my application. I am looking forward to the opportunity to discuss my qualifications further and explore how I can contribute to the success of Enigma Corps.\\n\\n\"\n \"Sincerely,\\n\"\n \"Billy Constantine\\n\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"meeting_minutes.txt\",\n \"content\": (\n \"Meeting Minutes\\n\\n\"\n \"Date: 24/06/2025\\n\"\n \"Location: REDACTED\\n\"\n \"Attendees: Amy, REDACTED, Billy, Kyle, REDACTED, REDACTED, REDACTED\\n\\n\"\n \"Agenda:\\n\"\n \"- Discuss progress on Project REDACTED\\n\"\n \"- Review safety protocols for handling sensitive materials\\n\"\n \"- Plan next steps for research and development\\n\\n\"\n \"Action Items:\\n\"\n \"- Compile data from recent experiments and share with team\\n\"\n \"- Schedule training session on updated safety protocols\\n\"\n \"- Develop timeline for next phase of Project X\\n\\n\"\n \"Next Meeting: 05/08/24, 12:00pm\\n\"\n )\n },\n {\n \"name\": \"employee_performance_review.txt\",\n \"content\": (\n \"Employee Performance Review\\n\\n\"\n \"Employee Name: Billy Constantine\\n\"\n \"Employee ID: 035854\\n\"\n \"Review Date: 28/06/2024\\n\\n\"\n \"Performance Summary:\\n\"\n \"Billy has demonstrated exceptional performance in his role as a sales representative. He has consistently exceeded sales targets, built strong relationships with clients, and demonstrated leadership qualities in team meetings and projects.\\n\\n\"\n \"Strengths:\\n\"\n \"- Exceeded quarterly sales targets by 15%.\\n\"\n \"- Successfully onboarded and mentored two new team members.\\n\"\n \"- Demonstrated excellent communication and negotiation skills.\\n\\n\"\n \"Areas for Improvement:\\n\"\n \"- Time management skills can be further developed to ensure all tasks are completed in a timely manner.\\n\"\n \"- Continued development of technical knowledge to stay up-to-date with industry trends.\\n\"\n \"- Strengthen collaboration with cross-functional teams to drive more integrated solutions.\\n\\n\"\n \"Goals for Next Review Period:\\n\"\n \"- Increase sales targets by 20%.\\n\"\n \"- Complete a management training program.\\n\"\n \"- Improve time management skills through prioritization and delegation.\\n\\n\"\n \"Overall Rating: 4.5/5\\n\"\n \"Reviewer Name: Katie Thompson\\n\"\n \"Reviewer Signature: Katie Thompson\\n\"\n \"Date: 28/06/2024\\n\"\n )\n }\n ]\n self.emails = [\n\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Amy,\\n\\n\"\n \"I hope this message finds you in great spirits! I'm more than happy to lend a helping hand with the Smith project. After all, two heads are better than one, especially when it comes to data analysis, right?\\n\\n\"\n \"How about we grab a coffee and chat about the project in person? I think it would be nice to catch up and discuss the data over a cup of joe. I'm sure we can brainstorm some ideas and come up with a game plan together.\\n\\n\"\n \"I'm free [date] at [time], does that work for you? If not, just let me know your availability, and we can find a time that suits us both. I'm really looking forward to our coffee date and tackling the project together.\\n\\n\"\n \"Can't wait to see you and dive into the data!\\n\\n\"\n \"Best,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Project Update\",\n \"body\": (\n \"Hello Team,\\n\\n\"\n \"I wanted to provide everyone with a quick update on our progress with the Q3 deliverables project. We've successfully completed the initial research phase and are now moving into the planning stage.\\n\\n\"\n \"In our last meeting, we discussed the following key points:\\n\"\n \"- Compound Analysis: We've identified a unique compound with potential applications in various industries. Further testing and analysis are required to unlock its full potential.\\n\"\n \"- Resource Management: We've allocated a special team and dedicated resources to handle the delicate nature of this project, ensuring utmost confidentiality and security.\\n\"\n \"- Safety Protocols: We've developed strict safety protocols to handle the compound, and we're conducting regular training sessions to ensure compliance.\\n\\n\"\n \"Our next steps include finalizing the project plan, assigning tasks to team members, and setting deadlines. I would appreciate input and feedback from all team members to ensure we're on the right track. Please review the attached project plan document for more details.\\n\\n\"\n \"Additionally, I want to remind everyone of the confidential nature of this project. It's imperative that we maintain discretion and follow all security protocols to safeguard our work. Let's work together to make this project a success and uphold the company's reputation for innovation and excellence.\\n\\n\"\n \"If you have any questions or concerns, please don't hesitate to reach out. Your cooperation and commitment to this project are greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Can't Stop Thinking About You\",\n \"body\": (\n \"Hey there, Amy,\\n\\n\"\n \"Wow, your message really caught me by surprise! But in the best way possible, of course. I've been trying to play it cool, but I have to admit, I've been thinking about that night a lot too. There was just something electric in the air, wasn't there?\\n\\n\"\n \"I've been tossing and turning, wondering if I should reach out to you or if I should wait for you to make the first move. I guess you beat me to it, and I'm glad you did. It's like you read my mind.\\n\\n\"\n \"I can't deny that there's a certain chemistry between us, and I'm intrigued to see where it could lead. I agree that our lives are complicated, and we don't want to add more stress to each other's plates. But sometimes, taking a risk is what makes life exciting, don't you think?\\n\\n\"\n \"I don't want to rush things or make you feel pressured in any way. I'm more than happy to take things slow and let them unfold naturally. But I can't help but imagine the possibilities if we give this a real shot. We could have something truly special, and I don't want to let that pass us by.\\n\\n\"\n \"How about we meet up for dinner and drinks next week? We can talk about it more and see where the night takes us. I think it would be a fun and relaxed way to get to know each other better and explore this connection we have. What do you say?\\n\\n\"\n \"I hope you're doing well, and I'm eagerly awaiting your reply. Until then, I'll be daydreaming about our next encounter.\\n\\n\"\n \"Take care, and talk to you soon.\\n\\n\"\n \"Yours truly,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Amy,\\n\\n\"\n \"No problem at all! I'm always here to help out when I can. It's what teammates do, right?\\n\\n\"\n \"Oh, and about the password thing – haha, I know it's not the most secure choice. I've been meaning to change it, but I guess old habits die hard, right? \"\n \"Thanks for looking out for me though! I'll try to come up with something a bit more creative next time.\\n\\n\"\n \"If you ever need anything else, just give me a shout. Happy to help!\\n\\n\"\n \"Take care,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Professional Development\",\n \"body\": (\n \"Good Evening Katie,\\n\\n\"\n \"I hope this email finds you well. I'm reaching out to express my interest in professional development opportunities within the company, particularly in the area of management and leadership.\\n\\n\"\n \"I've been with the company for several years now, and I've had the chance to work on various projects and collaborate with different teams. I'm keen to build on this experience and take on more responsibility, and I believe that acquiring the necessary skills for a management role would be a great next step in my career.\\n\\n\"\n \"Could you please provide information on available training programs, workshops, or seminars that focus on leadership development and management skills? I'm particularly interested in areas such as team leadership, strategic planning, conflict resolution, and decision-making.\\n\\n\"\n \"Additionally, if there are any tuition reimbursement programs or resources for management training and certification, I'd like to learn more about them. I'm committed to investing time and effort in my professional growth and believe that these opportunities would greatly benefit both myself and the company.\\n\\n\"\n \"Your guidance and assistance in exploring these options would be greatly appreciated. I look forward to your response and any recommendations you may have.\\n\\n\"\n \"Thank you for your support, and I'm excited about the prospect of contributing to the company's success in a management role.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "camera_first", "path": "systems/level_1/cameras/camera_1.py", "snippet": "def camera_first():\n print(camera_1)\n print()\n print()\n move = input(Fore.GREEN + \"> \" + Style.RESET_ALL)\n\n if move.lower() == \"forward\":\n clear_terminal()\n camera_second()\n elif move.lower() == \"back\":\n print(Fore.RED + \"There is nothing to go back to...\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n camera_first()" }, { "identifier": "MarkusSystem", "path": "systems/level_1/markus/markus_system.py", "snippet": "class MarkusSystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"system_log.txt\",\n \"content\": (\n \"Enigma Corps System Log\\n\\n\"\n \"Date: 2023-11-16 08:00 AM\\n\"\n \"Event Type: System Startup\\n\"\n \"Description: The Enigma Corps systems smoothly initiated startup procedures, ensuring a seamless beginning to the workday.\\n\\n\"\n \"Date: 2023-11-16 10:30 AM\\n\"\n \"Event Type: Network Upgrade\\n\"\n \"Description: Implemented a network upgrade to enhance data transfer speeds, providing improved efficiency across departments.\\n\\n\"\n \"Date: 2023-11-16 01:45 PM\\n\"\n \"Event Type: Security Patch Applied\\n\"\n \"Description: Critical security patch successfully applied to safeguard against potential vulnerabilities, ensuring system integrity.\\n\\n\"\n \"Date: 2023-11-16 04:20 PM\\n\"\n \"Event Type: Server Maintenance\\n\"\n \"Description: Conducted routine maintenance on Enigma Corps servers, optimizing performance and minimizing downtime.\\n\\n\"\n \"This dynamic system log captures key events, from the smooth startup of the day to network upgrades, security enhancements, and routine maintenance. It serves as a valuable record for troubleshooting and analysis, ensuring the optimal functionality of Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"technical_documentation.docx\",\n \"content\": (\n \"Enigma Corps System Technical Documentation\\n\\n\"\n \"1. System Architecture:\\n\"\n \" - Overview of the system's structural design and components.\\n\\n\"\n \"2. Network Configuration:\\n\"\n \" - Details on the configuration of Enigma Corps' network setup for efficient communication.\\n\\n\"\n \"3. Security Protocols:\\n\"\n \" - Comprehensive overview of security measures and protocols implemented to safeguard sensitive data.\\n\\n\"\n \"4. Troubleshooting Guide:\\n\"\n \" - Step-by-step guide for identifying and resolving common issues to ensure seamless system functionality.\\n\\n\"\n \"5. Software Installation Procedures:\\n\"\n \" - Instructions for installing and updating software components within the Enigma Corps system.\\n\\n\"\n \"6. Hardware Specifications:\\n\"\n \" - Detailed specifications of the hardware components utilized in the Enigma Corps infrastructure.\\n\\n\"\n \"This meticulously crafted technical documentation serves as a go-to resource for understanding the Enigma Corps system, covering everything from its architecture and network configuration to security protocols, troubleshooting, and hardware specifications. It's an invaluable reference for maintaining optimal system performance.\"\n )\n },\n {\n \"name\": \"passwords.txt\",\n \"content\": (\n \"Sensitive Password Information for Enigma Corps\\n\\n\"\n \"Admin Password: *********\\n\"\n \"Database Password: *********\\n\"\n \"Router Password: *********\\n\"\n \"WiFi Password: *********\\n\"\n \"Encryption Key: *********\\n\\n\"\n \"Warning: This file contains confidential information. Keep it secure, and refrain from sharing passwords without explicit authorization. Safeguarding this information is crucial to maintaining the security and integrity of the Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"software_inventory.csv\",\n \"content\": (\n \"Software Inventory for Enigma Corps\\n\\n\"\n \"Software Name, Version, License Key\\n\"\n \"1. Enigma Security Suite, v2.0, X1Y2Z3A4-B5C6D7E8-F9G0H1I2\\n\"\n \"2. DataGuard Backup, v1.5, Y3X2W1V0-U9T8S7R6-Q5P4O3N2\\n\"\n \"3. Office Suite, v2022, Z9Z8Z7Z6-Z5Z4Z3Z2-Z1Z0Z9Z8-Z7Z6Z5\\n\"\n \"4. VPN Client, v3.1, W6W5W4W3-W2W1W0-W9W8W7-W6W5W4\\n\"\n \"5. Project Management Tool, v4.2, VV8V7V6V5-V4V3V2V1-V0V9V8V7-V6V5V4\\n\\n\"\n \"Important: This inventory is crucial for tracking and managing software across Enigma Corps systems. The provided license keys are randomized for security reasons. Handle this information responsibly, and ensure it is only accessible to authorized personnel to maintain the security and compliance of our software assets.\"\n )\n }\n ]\n self.emails = [\n # Email to Management\n {\n \"sender\": \"Markus\",\n \"subject\": \"System Maintenance Scheduled\",\n \"body\": (\n \"Dear Michael,\\n\\n\"\n \"I hope this email finds you well. We wanted to inform you that we have scheduled a system maintenance session for the upcoming weekend to ensure the optimal performance and security of our systems.\\n\\n\"\n \"Maintenance Details:\\n\"\n \"- Date: 16/12/23 - 17/12/23\\n\"\n \"- Time: 3:00pm\\n\"\n \"- Duration: 1 Hour\\n\"\n \"- Impact: No impact expected\\n\\n\"\n \"During this period, there might be temporary disruptions in certain services. Our team will be working diligently to minimize any inconvenience. If you have any concerns or specific considerations, please feel free to reach out to us.\\n\\n\"\n \"Thank you for your understanding and cooperation.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Department\"\n )\n },\n {\n # Email to Employees\n \"sender\": \"Markus\",\n \"subject\": \"Upcoming Software Update\",\n \"body\": (\n \"Good afternoon, Kyle,\\n\\n\"\n \"We hope you're doing well. Our IT team is excited to inform you about an upcoming software update that will enhance the functionality and security of our systems. The update is scheduled for [Date] at [Time]. Please take note of the following details:\\n\\n\"\n \"- Expected Duration: Two Days\\n\"\n \"- Action Required: As this will be processed during the weekend, no action is required.\\n\"\n \"- Impact: While we anticipate minimal impact on your day-to-day activities, it's essential to be aware of any potential changes. These include: New UI to navigate, logging in or logging out issues.\\n\\n\"\n \"We recommend saving your work and logging out of your system before the update. If you encounter any issues post-update, don't hesitate to contact our IT support team for assistance.\\n\\n\"\n \"Thank you for your cooperation and understanding.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Support Team\"\n )\n },\n # Email from Markus to Billy\n {\n \"sender\": \"Markus\",\n \"subject\": \"Urgent: Password Security Update Required\",\n \"body\": (\n \"Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to bring to your attention the importance of updating your current password. This is not the first time I've raised this concern, and I want to emphasize its critical nature.\\n\\n\"\n \"In recent security assessments, it has been flagged that your current password might not meet the latest security standards. To ensure the safety of your account and our overall cybersecurity, it is imperative that you change your password promptly.\\n\\n\"\n \"I understand that these reminders may seem repetitive, but they stem from a genuine concern for the security of your account and our collective responsibility in maintaining a robust cybersecurity posture.\\n\\n\"\n \"Please take a moment at your earliest convenience to update your password. If you encounter any issues or have questions, feel free to reach out. Your cooperation is greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Markus, Security Team\"\n )\n }\n\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" } ]
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
13,723
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem()
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem()
billy_system = BillySystem()
19
2023-11-06 09:52:13+00:00
16k
ziqi-zhang/TAOISM
python/test/test_bn.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_layer", "path": "python/common_net.py", "snippet": "def register_weight_layer(layer, name):\n register_layer(layer, name)\n layer_weight[name] = layer.weight\n linear_layer_names.append(name)" }, { "identifier": "get_layer_weight", "path": "python/common_net.py", "snippet": "def get_layer_weight(name):\n return layer_weight[name]" }, { "identifier": "get_layer_input", "path": "python/common_net.py", "snippet": "def get_layer_input(name):\n return layer_input[name]" }, { "identifier": "get_layer_weight_grad", "path": "python/common_net.py", "snippet": "def get_layer_weight_grad(name):\n return layer_weight[name].grad.data" }, { "identifier": "get_layer_output", "path": "python/common_net.py", "snippet": "def get_layer_output(name):\n return layer_output[name]" }, { "identifier": "get_layer_output_grad", "path": "python/common_net.py", "snippet": "def get_layer_output_grad(name):\n return layer_output_grad[name]" }, { "identifier": "get_layer_input_grad", "path": "python/common_net.py", "snippet": "def get_layer_input_grad(name):\n return layer_input_grad[name]" }, { "identifier": "GlobalTensor", "path": "python/enclave_interfaces.py", "snippet": "class GlobalTensor(object):\n cpu_tensor = {}\n gpu_tensors = {}\n encrypted_tensors = {}\n LinkedTags = {}\n InverseLinkedTags = {}\n IsInitEnclaveTensor = {}\n EnclaveInterface = None\n eid = None\n is_init_global_tensor = False\n\n @staticmethod\n def init():\n if GlobalTensor.is_init_global_tensor:\n return\n GlobalTensor.EnclaveInterface = EnclaveInterface()\n GlobalTensor.EnclaveInterface.init_enclave()\n GlobalTensor.is_init_global_tensor = True\n\n @staticmethod\n def destroy():\n GlobalTensor.EnclaveInterface.destroy_enclave()\n\n GlobalTensor.cpu_tensor = {}\n GlobalTensor.gpu_tensors = {}\n GlobalTensor.encrypted_tensors = {}\n GlobalTensor.LinkedTags = {}\n GlobalTensor.InverseLinkedTags = {}\n GlobalTensor.IsInitEnclaveTensor = {}\n GlobalTensor.EnclaveInterface = None\n GlobalTensor.eid = None\n GlobalTensor.is_init_global_tensor = False\n\n\n @staticmethod\n def get_eid():\n return GlobalTensor.EnclaveInterface.get_eid()\n\n @staticmethod\n def link_tags(tag1, tag2):\n if tag1 == tag2:\n return\n\n friends = []\n\n def add_friends(tag):\n nonlocal friends\n if tag in GlobalTensor.LinkedTags:\n its_leader_tag = GlobalTensor.LinkedTags[tag]\n if its_leader_tag in GlobalTensor.InverseLinkedTags:\n friends += GlobalTensor.InverseLinkedTags.pop(its_leader_tag)\n else:\n friends += [tag]\n\n add_friends(tag1)\n add_friends(tag2)\n leader_tag = min(friends)\n\n GlobalTensor.InverseLinkedTags[leader_tag] = friends\n for t in friends:\n if t in GlobalTensor.IsInitEnclaveTensor:\n raise ValueError(\"Tags must linked before tensor initialization\")\n GlobalTensor.LinkedTags[t] = leader_tag\n\n @staticmethod\n def get_remapped_tags(tag):\n return GlobalTensor.LinkedTags[tag] if tag in GlobalTensor.LinkedTags else tag\n\n @staticmethod\n def set_cpu(tag, tensor):\n GlobalTensor.cpu_tensor[tag] = tensor.to(torch.device(\"cpu\"))\n\n @staticmethod\n def set_gpu(tag, tensor):\n GlobalTensor.gpu_tensors[tag] = tensor\n\n @staticmethod\n def set_encrypted(tag, tensor):\n GlobalTensor.encrypted_tensors[tag] = tensor\n\n @staticmethod\n def get_cpu(tag):\n return GlobalTensor.cpu_tensor[tag]\n\n @staticmethod\n def get_gpu(tag):\n return GlobalTensor.gpu_tensors[tag]\n\n @staticmethod\n def get_encryption(tag):\n return GlobalTensor.encrypted_tensors[tag]\n\n @staticmethod\n def init_enclave_tensor(tag, size):\n size = list(size)\n if len(size) < 4:\n size = [1] * (4 - len(size)) + size\n remapped_tag = GlobalTensor.get_remapped_tags(tag)\n if remapped_tag in GlobalTensor.IsInitEnclaveTensor:\n return\n else:\n GlobalTensor.IsInitEnclaveTensor[remapped_tag] = True\n eid = GlobalTensor.get_eid()\n GlobalTensor.EnclaveInterface.lib.InitTensor(eid, remapped_tag, size[0], size[1], size[2], size[3])\n\n @staticmethod\n def init_encrypted_tensor(tag, shape):\n GlobalTensor.encrypted_tensors[GlobalTensor.get_remapped_tags(tag)] = \\\n GlobalTensor.EnclaveInterface.create_encrypt_torch(shape)" }, { "identifier": "SecretBatchNorm2dLayer", "path": "python/layers/batch_norm_2d.py", "snippet": "class SecretBatchNorm2dLayer(SecretActivationLayer):\n # https://pytorch.org/docs/stable/nn.html#batchnorm2d\n\n BatchSize = None\n NumChannel = None\n ImgH = None\n ImgW = None\n WeightShape = None\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n \n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next, merge_own_tensors\n )\n \n self.ForwardFuncName = \"BatchNorm2d\"\n self.BackwardFuncName = \"DerBatchNorm2d\"\n self.PlainFunc = torch.nn.BatchNorm2d\n self.IsAffine = True\n self.momentum = 0.1\n self.IsCumulative = (self.momentum is None)\n self.epsilon = 1e-5\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.BatchNorm2d\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.BatchNorm2d\n # self.StoreInEnclave = False\n \n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = self.InputShape\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW = self.InputShape\n self.WeightShape = [self.NumChannel]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n \n\n # def init(self, start_enclave=True):\n \n # if self.sid == 2:\n # return\n # TensorLoader.init(self, start_enclave)\n\n # if self.is_enclave_mode:\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n # self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n # self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n # self.batchnorm_init(\n # self.LayerName,\n # \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n # \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n # \"mu\",\n # self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n # int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n # else:\n # self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n # self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n # self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n # self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n # self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n # self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n # self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n # self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n # self.ForwardFunc.eval()\n\n def init(self, start_enclave=True):\n # if self.LayerName == \"Layer3.10.proxies.0.bn2\":\n # st()\n TensorLoader.init(self, start_enclave)\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # inject sqrt(running_var) instead of running_var for precision\n self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n self.batchnorm_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n \"mu\",\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.ForwardFunc.eval()\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_gpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_gpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.PlainFunc.eval()\n self.ForwardFunc.cuda().eval()\n\n # def inject_params(self, params):\n # if self.sid == -2:\n # raise ValueError(\"S2 has no learnable parameters for injection\")\n # self.get_cpu(\"weight\").copy_(params.weight.data)\n # self.get_cpu(\"bias\").copy_(params.bias.data)\n # self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n # if self.is_enclave_mode:\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n\n def inject_params(self, params):\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]: \n self.get_cpu(\"weight\").copy_(params.weight.data)\n self.get_cpu(\"bias\").copy_(params.bias.data)\n self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.get_gpu(\"weight\").copy_(params.weight.data)\n self.get_gpu(\"bias\").copy_(params.bias.data)\n self.get_gpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_gpu(\"RunVar\").copy_(params.running_var.data)\n\n def reset_plain_bn(self):\n # module = torch.BatchNorm2d()\n self.get_cpu(\"weight\").copy_(torch.ones(self.InputShape[1]))\n self.get_cpu(\"bias\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunMean\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunVar\").copy_(torch.ones(self.InputShape[1]))\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n raise NotImplementedError\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n plain_layer.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n plain_layer.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n NeededTensorNames = [\n (\"input\", self.InputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"output\", self.OutputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n (\"RunMean\", self.WeightShape, None),\n (\"CurMean\", self.WeightShape, None),\n (\"RunVar\", self.WeightShape, None),\n (\"CurVar\", self.WeightShape, None),\n (\"mu\", self.InputShape, None),\n ]\n else:\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n # (\"DerOutput\", self.OutputShape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n # def forward(self):\n # if self.sid == 2:\n # return\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # if self.is_enclave_mode:\n # self.forward_tensor_transfer()\n # self.batchnorm_forward(self.LayerName, int(False))\n # else:\n # self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n # self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n # self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n # self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # # running_var of PlainFunc is ^2 of that in the enclave\n # enclave_running_var = self.get_cpu(\"RunVar\")\n # self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # if self.LayerName == \"Layer2.0.downsample.bn\":\n # st()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} batchnorm_forward\", verbose_level=VerboseLevel.LAYER):\n self.batchnorm_forward(self.LayerName, int(False))\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_gpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_gpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # st()\n # print(self.get_gpu(\"input\")[0,0,0])\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def backward(self):\n raise NotImplementedError\n if self.sid == 2:\n return\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n if self.is_enclave_mode:\n self.backward_tensor_transfer()\n self.batchnorm_backward(self.LayerName)\n else:\n self.backward_tensor_transfer()\n BackwardInput, BackwardWeight, BackwardBias = self.get_cpu(\"output\").grad_fn(self.get_cpu(\"DerOutput\"))\n self.set_cpu(\"DerInput\", BackwardInput.data)\n self.set_cpu(\"DerWeight\", BackwardWeight.data)\n self.set_cpu(\"DerBias\", BackwardBias.data)\n if list(self.get_cpu(\"DerWeight\").shape) != self.WeightShape:\n real_shape = self.get_cpu(\"DerWeight\").shape\n ideal_shape = self.WeightShape\n raise ValueError(\n f\"DerWeight is not of shape self.AffineShape: real: {real_shape}, ideal: {ideal_shape}\")\n if list(self.get_cpu(\"DerBias\").shape) != self.WeightShape:\n raise ValueError(\"DerBias is not of shape self.AffineShape\")\n\n def plain_forward(self, NeedBackward=False):\n if self.sid == 2:\n return\n if self.EnclaveMode in [ExecutionModeOptions.Enclave, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"bias\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # self.PlainFunc.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.PlainFunc.running_var.data.copy_(enclave_running_var)\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n torch.set_num_threads(4)\n\n def plain_backward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n torch.set_num_threads(1)\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n torch.set_num_threads(4)\n\n def show_plain_error(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"DerInput\")\n self.make_sure_cpu_is_latest(\"DerWeight\")\n self.make_sure_cpu_is_latest(\"DerBias\")\n else:\n self.make_sure_cpu_is_latest(\"DerInput\")\n BackwardInput, BackwardWeight, BackwardBias = self.PlainBackwardResult\n err_input = compare_expected_actual(BackwardInput, self.get_cpu(\"DerInput\"), show_where_err=False, get_relative=True)\n err_weight = compare_expected_actual(BackwardWeight, self.get_cpu(\"DerWeight\"), show_where_err=False,\n get_relative=True)\n err_bias = compare_expected_actual(BackwardBias, self.get_cpu(\"DerBias\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error input: {err_input}, weight {err_weight}, bias: {err_bias}\")\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SecretInputLayer", "path": "python/layers/input.py", "snippet": "class SecretInputLayer(SecretNonlinearLayer):\n shape = None\n\n def __init__(\n self, sid, LayerName, input_shape, EnclaveMode, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.shape = input_shape\n\n def link_tensors(self):\n gt.link_tags(self.get_tag(\"input\", remap=False), self.get_tag(\"output\", remap=False))\n super().link_tensors()\n\n def init_shape(self):\n return\n\n def set_input(self, tensor):\n self.set_tensor_cpu_gpu_enclave(\"input\", tensor)\n\n def get_output_shape(self):\n return self.shape\n\n def forward(self):\n return\n\n def backward(self):\n return\n\n def plain_forward(self):\n return\n\n def plain_backward(self):\n return\n\n def show_plain_error(self):\n return\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.shape} output {self.NextLayer.LayerName:30}\")" }, { "identifier": "SecretOutputLayer", "path": "python/layers/output.py", "snippet": "class SecretOutputLayer(SecretNonlinearLayer):\n TargetShape = None\n loss = 0\n\n def __init__(\n self, sid, LayerName, EnclaveMode, inference=False, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFunc = torch.nn.CrossEntropyLoss()\n self.PlainFunc = torch.nn.CrossEntropyLoss()\n self.EnclaveMode = ExecutionModeOptions.CPU\n self.inference = inference\n\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = [1]\n self.TargetShape = [self.InputShape[0]] # number of Minibatch\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"target\", self.TargetShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def load_target(self, tensor):\n self.set_tensor_with_name(\"target\", tensor)\n\n def get_loss(self):\n return self.loss\n \n def get_prediction(self):\n self.forward_tensor_transfer(\"input\")\n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n raise RuntimeError(\"SGX input not load\")\n return self.get_cpu(\"input\")\n\n def forward(self):\n if not self.inference:\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\")))\n loss = self.get_cpu(\"output\").item()\n self.loss = loss\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer(transfer_tensor=\"output\")\n self.get_cpu(\"output\").backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def plain_forward(self):\n if not self.inference:\n self.make_sure_cpu_is_latest(\"input\")\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"output\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainForwardResult.backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def show_plain_error(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n self.make_sure_cpu_is_latest(\"DerInput\")\n\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.InputShape}{' ':30} input {self.PrevLayer.LayerName:30}\")" }, { "identifier": "init_communicate", "path": "python/sgx_net.py", "snippet": "def init_communicate(rank, master_address, master_port, backend='gloo'):\n os.environ['MASTER_ADDR'] = master_address\n os.environ['MASTER_PORT'] = master_port\n dist.init_process_group(backend, rank=rank, world_size=SecretConfig.worldSize)" }, { "identifier": "warming_up_cuda", "path": "python/sgx_net.py", "snippet": "def warming_up_cuda():\n device = torch.device(\"cuda:0\")\n # device = torch.device(\"cpu\")\n\n print(\"Execution device: \", device)\n print(\"PyTorch version: \", torch.__version__)\n print(\"CUDA version: \", torch.version.cuda)\n print(\"CUDA device:\", torch.cuda.get_device_name(0))\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 512, 512, 256, 4, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cuda double\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.cuda().type(SecretConfig.dtypeForCudaMm), dummy_b.cuda().type(SecretConfig.dtypeForCudaMm),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda dobule 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.double), dummy_b.cuda().type(torch.double),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 64, 64, 64, 8, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cpu\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.type(SecretConfig.dtypeForCpuOp), dummy_b.type(SecretConfig.dtypeForCpuOp),\n padding=1)\n\n with NamedTimerInstance(\"Warming up CppExtension\"):\n GlobalCppExtension.get_conv2d_cudnn()" }, { "identifier": "SecretNeuralNetwork", "path": "python/sgx_net.py", "snippet": "class SecretNeuralNetwork(TensorLoader):\n nn_name = None\n layers = None\n\n def __init__(self, sid, nn_name):\n super().__init__()\n self.sid = sid\n self.init(start_enclave=False)\n self.nn_name = nn_name\n\n def set_layers(self, layers):\n self.layers = layers\n\n if not isinstance(self.layers[0], SecretInputLayer):\n raise ValueError(\"The first layer has to be input layer\")\n if not isinstance(self.layers[-1], SecretOutputLayer):\n raise ValueError(\"The last layer has to be output layer\")\n \n for i in range(len(self.layers) - 1):\n PrevLayer = self.layers[i]\n NextLayer = self.layers[i + 1]\n if not PrevLayer.manually_register_next:\n PrevLayer.register_next_layer(NextLayer)\n if not NextLayer.manually_register_prev:\n NextLayer.register_prev_layer(PrevLayer)\n\n \n for layer in self.layers:\n # print(f\"Init_shape/link layer {layer.LayerName}\")\n layer.set_eid(self.get_eid())\n layer.init_shape()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n layer.link_tensors()\n # print(layer.LayerName)\n # layer.print_tensor_link_relation()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n \n for idx, layer in enumerate(self.layers):\n # print(f\"Init layer {layer.LayerName}\")\n # if layer.LayerName == \"Layer1.0.main.relu2\":\n # st()\n layer.init(start_enclave=False)\n # if idx > 3:\n # print(layer.LayerName, self.layers[4].get_cpu(\"input\").shape, self.layers[4].PrevLayer.LayerName)\n\n def execute_for_each_layer(self, func, reverse=False):\n layers = self.layers[::-1] if reverse else self.layers\n for layer in layers:\n # print(f\"SID: {self.sid} {layer.LayerName}, {func}\")\n if self.sid == 2 and layer.IsDummyForS2:\n continue\n # print(\"Processing \", layer.LayerName)\n func(layer)\n \n # st()\n\n def classifier_output(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} classifier_output\"):\n self.forward()\n if self.sid == 2:\n return\n # layers: input_layer, ..., fc_layer, output_layer\n last_fc = self.layers[-2]\n last_fc.transfer_enclave_to_cpu(\"output\")\n outputs = last_fc.get_cpu(\"output\")\n _, predicted = torch.max(outputs.data, 1)\n return predicted\n\n def get_loss(self):\n return self.layers[-1].get_loss()\n\n def forward_with_time(self):\n def run_forward(layer):\n layer.forward()\n t0 = time()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n t1 = time()\n # time in ms\n elapse_time = (t1 - t0) * (10 ** 3) \n return elapse_time\n\n def forward(self):\n def run_forward(layer):\n layer.forward()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n\n def backward(self):\n def run_backward(layer):\n layer.backward()\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} Backward\"):\n self.execute_for_each_layer(run_backward, reverse=True)\n\n def plain_forward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainForward\"):\n self.execute_for_each_layer(lambda x: x.plain_forward())\n\n def plain_backward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainBackward\"):\n self.execute_for_each_layer(lambda x: x.plain_backward(), reverse=True)\n\n def show_plain_error(self):\n self.execute_for_each_layer(lambda x: x.show_plain_error())" }, { "identifier": "SgdOptimizer", "path": "python/sgx_net.py", "snippet": "class SgdOptimizer(TensorLoader):\n def __init__(self, sid):\n super().__init__()\n self.sid = sid\n self.learning_rate = 0.05\n self.momentum = 0.9\n self.weight_decay = 5e-4\n self.momentum_init_flags = defaultdict(lambda: False)\n self.ideal_momentum_buf = {}\n\n self.lr_gamma = 0.5\n self.lr_step = 30\n self.step_counter = 0\n\n self.layers = None\n\n def set_layers(self, layers):\n self.layers = layers\n\n def generate_tensor_name_list(self, force=False):\n # Run if forced or self.tensor_name_list is not generated\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n return\n\n self.tensor_name_list = []\n for layer in self.layers:\n for (DerName, ParamName, shape) in layer.LearnableParamsList:\n self.tensor_name_list.append((ParamName + \"Momentum\", shape, None))\n\n def update_params(self, test_with_ideal=False):\n if self.sid == 2:\n return\n for layer in self.layers:\n self.update_params_in_layer(layer, test_with_ideal=test_with_ideal)\n\n def update_params_in_layer(self, layer, test_with_ideal=False):\n # ref: https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py\n if layer.LearnableParamsList is None:\n return\n\n task_ids = []\n for (der_name, param_name, shape) in layer.LearnableParamsList:\n momentum_name = param_name + \"Momentum\"\n global_momentum_name = layer.name_modifier(momentum_name)\n\n if layer.StoreInEnclave:\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n first_momentum = not self.momentum_init_flags[global_momentum_name]\n if first_momentum:\n # print(\"FIRST MOMENTUM\")\n self.momentum_init_flags[global_momentum_name] = True\n layer.init_enclave_tensor(momentum_name, shape)\n task_id = layer.sgd_update(param_name=param_name, grad_name=der_name, momentum_name=momentum_name,\n lr=self.learning_rate, momentum=self.momentum,\n weight_decay=self.weight_decay,\n first_momentum=first_momentum, is_async=True)\n if test_with_ideal:\n while not self.get_task_status(task_id):\n pass\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.transfer_enclave_to_cpu(momentum_name)\n layer.transfer_enclave_to_cpu(param_name)\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n else:\n task_ids.append(task_id)\n else:\n DerCpu = layer.get_cpu(der_name)\n ParamsCpu = layer.get_cpu(param_name)\n\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n\n DerCpu.add_(self.weight_decay, ParamsCpu)\n\n if not self.momentum_init_flags[global_momentum_name]:\n self.momentum_init_flags[global_momentum_name] = True\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.get_cpu(momentum_name).copy_(DerCpu)\n MomentumCpu = layer.get_cpu(momentum_name)\n else:\n MomentumCpu = layer.get_cpu(momentum_name)\n MomentumCpu.mul_(self.momentum).add_(1, DerCpu)\n\n ParamsCpu.add_(-self.learning_rate, MomentumCpu)\n\n if test_with_ideal:\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n\n # Wait for all tasks to be finished\n for task_id in task_ids:\n while not self.get_task_status(task_id):\n pass\n\n def ideal_update_params_with_name(self, layer, der_name, param_name, shape):\n weight_decay = self.weight_decay\n momentum = self.momentum\n dampening = 0\n nesterov = False\n lr = self.learning_rate\n\n global_momentum_name = layer.name_modifier(param_name + 'Momentum')\n\n if layer.StoreInEnclave:\n layer.transfer_enclave_to_cpu(der_name)\n layer.transfer_enclave_to_cpu(param_name)\n d_p = torch.clone(layer.get_cpu(der_name)).detach()\n p = torch.clone(layer.get_cpu(param_name)).detach()\n\n if weight_decay != 0:\n d_p.add_(weight_decay, p)\n if global_momentum_name not in self.ideal_momentum_buf:\n buf = self.ideal_momentum_buf[global_momentum_name] = torch.clone(d_p).detach()\n else:\n buf = self.ideal_momentum_buf[global_momentum_name]\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n p.add_(-lr, d_p)\n\n return p, buf" }, { "identifier": "ExecutionModeOptions", "path": "python/utils/basic_utils.py", "snippet": "class ExecutionModeOptions(Enum):\n Enclave = 1\n CPU = 2\n GPU = 3" }, { "identifier": "compare_expected_actual", "path": "python/utils/torch_utils.py", "snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res" }, { "identifier": "seed_torch", "path": "python/utils/torch_utils.py", "snippet": "def seed_torch(seed=123):\n # https://github.com/pytorch/pytorch/issues/7068\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True" } ]
import os import sys import numpy as np import torch import torch.distributed as dist from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.input import SecretInputLayer from python.layers.output import SecretOutputLayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.utils.basic_utils import ExecutionModeOptions from python.utils.torch_utils import compare_expected_actual, seed_torch
11,022
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def test_BN(sid=0, master_addr=0, master_port=0, is_compare=False): batch_size = 2 n_img_channel = 256 img_hw = 32 x_shape = [batch_size, n_img_channel, img_hw, img_hw]
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def test_BN(sid=0, master_addr=0, master_port=0, is_compare=False): batch_size = 2 n_img_channel = 256 img_hw = 32 x_shape = [batch_size, n_img_channel, img_hw, img_hw]
GlobalTensor.init()
8
2023-11-01 10:37:37+00:00
16k
Codra-Ingenierie-Informatique/DataLab
cdl/tests/features/embedded2_unit.py
[ { "identifier": "CDLMainWindow", "path": "cdl/core/gui/main.py", "snippet": "class CDLMainWindow(QW.QMainWindow, AbstractCDLControl, metaclass=CDLMainWindowMeta):\n \"\"\"DataLab main window\n\n Args:\n console: enable internal console\n hide_on_close: True to hide window on close\n \"\"\"\n\n __instance = None\n\n SIG_READY = QC.Signal()\n SIG_SEND_OBJECT = QC.Signal(object)\n SIG_SEND_OBJECTLIST = QC.Signal(object)\n SIG_CLOSING = QC.Signal()\n\n @staticmethod\n def get_instance(console=None, hide_on_close=False):\n \"\"\"Return singleton instance\"\"\"\n if CDLMainWindow.__instance is None:\n return CDLMainWindow(console, hide_on_close)\n return CDLMainWindow.__instance\n\n def __init__(self, console=None, hide_on_close=False):\n \"\"\"Initialize main window\"\"\"\n CDLMainWindow.__instance = self\n super().__init__()\n win32_fix_title_bar_background(self)\n self.setObjectName(APP_NAME)\n self.setWindowIcon(get_icon(\"DataLab.svg\"))\n\n execenv.log(self, \"Starting initialization\")\n\n self.__restore_pos_and_size()\n\n self.ready_flag = True\n\n self.hide_on_close = hide_on_close\n self.__old_size = None\n self.__memory_warning = False\n self.memorystatus = None\n\n self.console = None\n self.macropanel: MacroPanel = None\n\n self.signal_toolbar: QW.QToolBar = None\n self.image_toolbar: QW.QToolBar = None\n self.signalpanel: SignalPanel = None\n self.imagepanel: ImagePanel = None\n self.tabwidget: QW.QTabWidget = None\n self.docks: dict[AbstractPanel, QW.QDockWidget] = None\n self.h5inputoutput = H5InputOutput(self)\n\n self.openh5_action: QW.QAction = None\n self.saveh5_action: QW.QAction = None\n self.browseh5_action: QW.QAction = None\n self.settings_action: QW.QAction = None\n self.quit_action: QW.QAction = None\n self.auto_refresh_action: QW.QAction = None\n self.showlabel_action: QW.QAction = None\n\n self.file_menu: QW.QMenu = None\n self.edit_menu: QW.QMenu = None\n self.operation_menu: QW.QMenu = None\n self.processing_menu: QW.QMenu = None\n self.computing_menu: QW.QMenu = None\n self.plugins_menu: QW.QMenu = None\n self.view_menu: QW.QMenu = None\n self.help_menu: QW.QMenu = None\n\n self.__is_modified = None\n self.set_modified(False)\n\n # Starting XML-RPC server thread\n self.remote_server = RemoteServer(self)\n if Conf.main.rpc_server_enabled.get():\n self.remote_server.SIG_SERVER_PORT.connect(self.xmlrpc_server_started)\n self.remote_server.start()\n\n # Setup actions and menus\n if console is None:\n console = Conf.console.console_enabled.get()\n self.setup(console)\n\n execenv.log(self, \"Initialization done\")\n\n # ------API related to XML-RPC remote control\n @staticmethod\n def xmlrpc_server_started(port):\n \"\"\"XML-RPC server has started, writing comm port in configuration file\"\"\"\n Conf.main.rpc_server_port.set(port)\n\n def __get_current_basedatapanel(self) -> BaseDataPanel:\n \"\"\"Return the current BaseDataPanel,\n or the signal panel if macro panel is active\n\n Returns:\n BaseDataPanel: current panel\n \"\"\"\n panel = self.tabwidget.currentWidget()\n if not isinstance(panel, base.BaseDataPanel):\n panel = self.signalpanel\n return panel\n\n def __get_specific_panel(self, panel: str | None) -> BaseDataPanel:\n \"\"\"Return a specific BaseDataPanel.\n\n Args:\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n\n Returns:\n BaseDataPanel: panel\n\n Raises:\n ValueError: if panel is unknown\n \"\"\"\n if not panel:\n return self.__get_current_basedatapanel()\n if panel == \"signal\":\n return self.signalpanel\n if panel == \"image\":\n return self.imagepanel\n raise ValueError(f\"Unknown panel: {panel}\")\n\n @remote_controlled\n def get_group_titles_with_object_infos(\n self,\n ) -> tuple[list[str], list[list[str]], list[list[str]]]:\n \"\"\"Return groups titles and lists of inner objects uuids and titles.\n\n Returns:\n Tuple: groups titles, lists of inner objects uuids and titles\n \"\"\"\n panel = self.__get_current_basedatapanel()\n return panel.objmodel.get_group_titles_with_object_infos()\n\n @remote_controlled\n def get_object_titles(self, panel: str | None = None) -> list[str]:\n \"\"\"Get object (signal/image) list for current panel.\n Objects are sorted by group number and object index in group.\n\n Args:\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n\n Returns:\n list[str]: list of object titles\n\n Raises:\n ValueError: if panel is unknown\n \"\"\"\n return self.__get_specific_panel(panel).objmodel.get_object_titles()\n\n @remote_controlled\n def get_object(\n self,\n nb_id_title: int | str | None = None,\n panel: str | None = None,\n ) -> SignalObj | ImageObj:\n \"\"\"Get object (signal/image) from index.\n\n Args:\n nb_id_title: Object number, or object id, or object title.\n Defaults to None (current object).\n panel: Panel name. Defaults to None (current panel).\n\n Returns:\n Object\n\n Raises:\n KeyError: if object not found\n TypeError: if index_id_title type is invalid\n \"\"\"\n panelw = self.__get_specific_panel(panel)\n if nb_id_title is None:\n return panelw.objview.get_current_object()\n if isinstance(nb_id_title, int):\n return panelw.objmodel.get_object_from_number(nb_id_title)\n if isinstance(nb_id_title, str):\n try:\n return panelw.objmodel[nb_id_title]\n except KeyError:\n try:\n return panelw.objmodel.get_object_from_title(nb_id_title)\n except KeyError as exc:\n raise KeyError(\n f\"Invalid object index, id or title: {nb_id_title}\"\n ) from exc\n raise TypeError(f\"Invalid index_id_title type: {type(nb_id_title)}\")\n\n @remote_controlled\n def get_object_uuids(self, panel: str | None = None) -> list[str]:\n \"\"\"Get object (signal/image) uuid list for current panel.\n Objects are sorted by group number and object index in group.\n\n Args:\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n\n Returns:\n list[str]: list of object uuids\n\n Raises:\n ValueError: if panel is unknown\n \"\"\"\n return self.__get_specific_panel(panel).objmodel.get_object_ids()\n\n @remote_controlled\n def get_sel_object_uuids(self, include_groups: bool = False) -> list[str]:\n \"\"\"Return selected objects uuids.\n\n Args:\n include_groups: If True, also return objects from selected groups.\n\n Returns:\n List of selected objects uuids.\n \"\"\"\n panel = self.__get_current_basedatapanel()\n return panel.objview.get_sel_object_uuids(include_groups)\n\n @remote_controlled\n def select_objects(\n self,\n selection: list[int | str],\n panel: str | None = None,\n ) -> None:\n \"\"\"Select objects in current panel.\n\n Args:\n selection: List of object numbers (1 to N) or uuids to select\n panel: panel name (valid values: \"signal\", \"image\").\n If None, current panel is used. Defaults to None.\n \"\"\"\n panel = self.__get_specific_panel(panel)\n panel.objview.select_objects(selection)\n\n @remote_controlled\n def select_groups(\n self, selection: list[int | str] | None = None, panel: str | None = None\n ) -> None:\n \"\"\"Select groups in current panel.\n\n Args:\n selection: List of group numbers (1 to N), or list of group uuids,\n or None to select all groups. Defaults to None.\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used. Defaults to None.\n \"\"\"\n panel = self.__get_specific_panel(panel)\n panel.objview.select_groups(selection)\n\n @remote_controlled\n def delete_metadata(self, refresh_plot: bool = True) -> None:\n \"\"\"Delete metadata of selected objects\n\n Args:\n refresh_plot (bool | None): Refresh plot. Defaults to True.\n \"\"\"\n panel = self.__get_current_basedatapanel()\n panel.delete_metadata(refresh_plot)\n\n @remote_controlled\n def get_object_shapes(\n self,\n nb_id_title: int | str | None = None,\n panel: str | None = None,\n ) -> list:\n \"\"\"Get plot item shapes associated to object (signal/image).\n\n Args:\n nb_id_title: Object number, or object id, or object title.\n Defaults to None (current object).\n panel: Panel name. Defaults to None (current panel).\n\n Returns:\n List of plot item shapes\n \"\"\"\n obj = self.get_object(nb_id_title, panel)\n return list(obj.iterate_shape_items(editable=False))\n\n @remote_controlled\n def add_annotations_from_items(\n self, items: list, refresh_plot: bool = True, panel: str | None = None\n ) -> None:\n \"\"\"Add object annotations (annotation plot items).\n\n Args:\n items (list): annotation plot items\n refresh_plot (bool | None): refresh plot. Defaults to True.\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n \"\"\"\n panel = self.__get_specific_panel(panel)\n panel.add_annotations_from_items(items, refresh_plot)\n\n @remote_controlled\n def add_label_with_title(\n self, title: str | None = None, panel: str | None = None\n ) -> None:\n \"\"\"Add a label with object title on the associated plot\n\n Args:\n title (str | None): Label title. Defaults to None.\n If None, the title is the object title.\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n \"\"\"\n self.__get_specific_panel(panel).add_label_with_title(title)\n\n # ------Misc.\n @property\n def panels(self) -> tuple[AbstractPanel, ...]:\n \"\"\"Return the tuple of implemented panels (signal, image)\n\n Returns:\n tuple[SignalPanel, ImagePanel, MacroPanel]: tuple of panels\n \"\"\"\n return (self.signalpanel, self.imagepanel, self.macropanel)\n\n def __set_low_memory_state(self, state: bool) -> None:\n \"\"\"Set memory warning state\"\"\"\n self.__memory_warning = state\n\n def confirm_memory_state(self) -> bool: # pragma: no cover\n \"\"\"Check memory warning state and eventually show a warning dialog\n\n Returns:\n bool: True if memory state is ok\n \"\"\"\n if not env.execenv.unattended and self.__memory_warning:\n threshold = Conf.main.available_memory_threshold.get()\n answer = QW.QMessageBox.critical(\n self,\n _(\"Warning\"),\n _(\"Available memory is below %d MB.<br><br>Do you want to continue?\")\n % threshold,\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n return answer == QW.QMessageBox.Yes\n return True\n\n def check_stable_release(self) -> None: # pragma: no cover\n \"\"\"Check if this is a stable release\"\"\"\n if __version__.replace(\".\", \"\").isdigit():\n # This is a stable release\n return\n if \"b\" in __version__:\n # This is a beta release\n rel = _(\n \"This software is in the <b>beta stage</b> of its release cycle. \"\n \"The focus of beta testing is providing a feature complete \"\n \"software for users interested in trying new features before \"\n \"the final release. However, <u>beta software may not behave as \"\n \"expected and will probably have more bugs or performance issues \"\n \"than completed software</u>.\"\n )\n else:\n # This is an alpha release\n rel = _(\n \"This software is in the <b>alpha stage</b> of its release cycle. \"\n \"The focus of alpha testing is providing an incomplete software \"\n \"for early testing of specific features by users. \"\n \"Please note that <u>alpha software was not thoroughly tested</u> \"\n \"by the developer before it is released.\"\n )\n txtlist = [\n f\"<b>{APP_NAME}</b> v{__version__}:\",\n \"\",\n _(\"<i>This is not a stable release.</i>\"),\n \"\",\n rel,\n ]\n QW.QMessageBox.warning(self, APP_NAME, \"<br>\".join(txtlist), QW.QMessageBox.Ok)\n\n def __check_dependencies(self) -> None: # pragma: no cover\n \"\"\"Check dependencies\"\"\"\n if IS_FROZEN or execenv.unattended:\n # No need to check dependencies if DataLab has been frozen, or if\n # the user has chosen to ignore this check, or if we are in unattended mode\n # (i.e. running automated tests)\n\n if IS_FROZEN:\n QW.QMessageBox.information(\n self,\n _(\"Information\"),\n _(\n \"The dependency check feature is not relevant for the \"\n \"standalone version of DataLab.\"\n ),\n QW.QMessageBox.Ok,\n )\n return\n try:\n state = dephash.check_dependencies_hash(DATAPATH)\n bad_deps = [name for name in state if not state[name]]\n if not bad_deps:\n # Everything is OK\n QW.QMessageBox.information(\n self,\n _(\"Information\"),\n _(\n \"All critical dependencies of DataLab have been qualified \"\n \"on this operating system.\"\n ),\n QW.QMessageBox.Ok,\n )\n return\n except IOError:\n bad_deps = None\n txt0 = _(\"Non-compliant dependency:\")\n if bad_deps is None or len(bad_deps) > 1:\n txt0 = _(\"Non-compliant dependencies:\")\n if bad_deps is None:\n txtlist = [\n _(\"DataLab has not yet been qualified on your operating system.\"),\n ]\n else:\n txtlist = [\n \"<u>\" + txt0 + \"</u> \" + \", \".join(bad_deps),\n \"\",\n _(\n \"At least one dependency does not comply with DataLab \"\n \"qualification standard reference (wrong dependency version \"\n \"has been installed, or dependency source code has been \"\n \"modified, or the application has not yet been qualified \"\n \"on your operating system).\"\n ),\n ]\n txtlist += [\n \"\",\n _(\n \"This means that the application has not been officially qualified \"\n \"in this context and may not behave as expected.\"\n ),\n ]\n txt = \"<br>\".join(txtlist)\n QW.QMessageBox.warning(self, APP_NAME, txt, QW.QMessageBox.Ok)\n\n def check_for_previous_crash(self) -> None: # pragma: no cover\n \"\"\"Check for previous crash\"\"\"\n if execenv.unattended:\n self.__show_logviewer()\n elif Conf.main.faulthandler_log_available.get(\n False\n ) or Conf.main.traceback_log_available.get(False):\n txt = \"<br>\".join(\n [\n logviewer.get_log_prompt_message(),\n \"\",\n _(\"Do you want to see available log files?\"),\n ]\n )\n btns = QW.QMessageBox.StandardButton.Yes | QW.QMessageBox.StandardButton.No\n choice = QW.QMessageBox.warning(self, APP_NAME, txt, btns)\n if choice == QW.QMessageBox.StandardButton.Yes:\n self.__show_logviewer()\n\n def take_screenshot(self, name: str) -> None: # pragma: no cover\n \"\"\"Take main window screenshot\"\"\"\n self.memorystatus.set_demo_mode(True)\n qth.grab_save_window(self, f\"{name}\")\n self.memorystatus.set_demo_mode(False)\n\n def take_menu_screenshots(self) -> None: # pragma: no cover\n \"\"\"Take menu screenshots\"\"\"\n for panel in self.panels:\n if isinstance(panel, base.BaseDataPanel):\n self.tabwidget.setCurrentWidget(panel)\n for name in (\n \"file\",\n \"edit\",\n \"view\",\n \"operation\",\n \"processing\",\n \"computing\",\n \"help\",\n ):\n menu = getattr(self, f\"{name}_menu\")\n menu.popup(self.pos())\n qth.grab_save_window(menu, f\"{panel.objectName()}_{name}\")\n menu.close()\n\n # ------GUI setup\n def __restore_pos_and_size(self) -> None:\n \"\"\"Restore main window position and size from configuration\"\"\"\n pos = Conf.main.window_position.get(None)\n if pos is not None:\n posx, posy = pos\n self.move(QC.QPoint(posx, posy))\n size = Conf.main.window_size.get(None)\n if size is not None:\n width, height = size\n self.resize(QC.QSize(width, height))\n if pos is not None and size is not None:\n sgeo = self.screen().availableGeometry()\n out_inf = posx < -int(0.9 * width) or posy < -int(0.9 * height)\n out_sup = posx > int(0.9 * sgeo.width()) or posy > int(0.9 * sgeo.height())\n if len(QW.QApplication.screens()) == 1 and (out_inf or out_sup):\n # Main window is offscreen\n posx = min(max(posx, 0), sgeo.width() - width)\n posy = min(max(posy, 0), sgeo.height() - height)\n self.move(QC.QPoint(posx, posy))\n\n def __save_pos_and_size(self) -> None:\n \"\"\"Save main window position and size to configuration\"\"\"\n is_maximized = self.windowState() == QC.Qt.WindowMaximized\n Conf.main.window_maximized.set(is_maximized)\n if not is_maximized:\n size = self.size()\n Conf.main.window_size.set((size.width(), size.height()))\n pos = self.pos()\n Conf.main.window_position.set((pos.x(), pos.y()))\n\n def setup(self, console: bool = False) -> None:\n \"\"\"Setup main window\n\n Args:\n console: True to setup console\n \"\"\"\n self.__register_plugins()\n self.__configure_statusbar()\n self.__setup_global_actions()\n self.__add_signal_image_panels()\n self.__create_plugins_actions()\n self.__setup_central_widget()\n self.__add_menus()\n if console:\n self.__setup_console()\n self.__update_actions()\n self.__add_macro_panel()\n self.__configure_panels()\n\n def __register_plugins(self) -> None:\n \"\"\"Register plugins\"\"\"\n with qth.try_or_log_error(\"Discovering plugins\"):\n # Discovering plugins\n plugin_nb = len(discover_plugins())\n execenv.log(self, f\"{plugin_nb} plugin(s) found\")\n for plugin_class in PluginRegistry.get_plugin_classes():\n with qth.try_or_log_error(f\"Instantiating plugin {plugin_class.__name__}\"):\n # Instantiating plugin\n plugin: PluginBase = plugin_class()\n with qth.try_or_log_error(f\"Registering plugin {plugin.info.name}\"):\n # Registering plugin\n plugin.register(self)\n\n def __create_plugins_actions(self) -> None:\n \"\"\"Create plugins actions\"\"\"\n with self.signalpanel.acthandler.new_category(ActionCategory.PLUGINS):\n with self.imagepanel.acthandler.new_category(ActionCategory.PLUGINS):\n for plugin in PluginRegistry.get_plugins():\n with qth.try_or_log_error(f\"Create actions for {plugin.info.name}\"):\n plugin.create_actions()\n\n @staticmethod\n def __unregister_plugins() -> None:\n \"\"\"Unregister plugins\"\"\"\n while PluginRegistry.get_plugins():\n # Unregistering plugin\n plugin = PluginRegistry.get_plugins()[-1]\n with qth.try_or_log_error(f\"Unregistering plugin {plugin.info.name}\"):\n plugin.unregister()\n\n def __configure_statusbar(self) -> None:\n \"\"\"Configure status bar\"\"\"\n self.statusBar().showMessage(_(\"Welcome to %s!\") % APP_NAME, 5000)\n # Plugin status\n pluginstatus = status.PluginStatus()\n self.statusBar().addPermanentWidget(pluginstatus)\n # XML-RPC server status\n xmlrpcstatus = status.XMLRPCStatus()\n xmlrpcstatus.set_port(self.remote_server.port)\n self.statusBar().addPermanentWidget(xmlrpcstatus)\n # Memory status\n threshold = Conf.main.available_memory_threshold.get()\n self.memorystatus = status.MemoryStatus(threshold)\n self.memorystatus.SIG_MEMORY_ALARM.connect(self.__set_low_memory_state)\n self.statusBar().addPermanentWidget(self.memorystatus)\n\n def __setup_global_actions(self) -> None:\n \"\"\"Setup global actions\"\"\"\n self.openh5_action = create_action(\n self,\n _(\"Open HDF5 files...\"),\n icon=get_icon(\"fileopen_h5.svg\"),\n tip=_(\"Open one or several HDF5 files\"),\n triggered=lambda checked=False: self.open_h5_files(import_all=True),\n )\n self.saveh5_action = create_action(\n self,\n _(\"Save to HDF5 file...\"),\n icon=get_icon(\"filesave_h5.svg\"),\n tip=_(\"Save to HDF5 file\"),\n triggered=self.save_to_h5_file,\n )\n self.browseh5_action = create_action(\n self,\n _(\"Browse HDF5 file...\"),\n icon=get_icon(\"h5browser.svg\"),\n tip=_(\"Browse an HDF5 file\"),\n triggered=lambda checked=False: self.open_h5_files(import_all=None),\n )\n self.settings_action = create_action(\n self,\n _(\"Settings...\"),\n icon=get_icon(\"libre-gui-settings.svg\"),\n tip=_(\"Open settings dialog\"),\n triggered=self.__edit_settings,\n )\n main_toolbar = self.addToolBar(_(\"Main Toolbar\"))\n add_actions(\n main_toolbar,\n [\n self.openh5_action,\n self.saveh5_action,\n self.browseh5_action,\n None,\n self.settings_action,\n ],\n )\n # Quit action for \"File menu\" (added when populating menu on demand)\n if self.hide_on_close:\n quit_text = _(\"Hide window\")\n quit_tip = _(\"Hide DataLab window\")\n else:\n quit_text = _(\"Quit\")\n quit_tip = _(\"Quit application\")\n if sys.platform != \"darwin\":\n # On macOS, the \"Quit\" action is automatically added to the application menu\n self.quit_action = create_action(\n self,\n quit_text,\n shortcut=QG.QKeySequence(QG.QKeySequence.Quit),\n icon=get_icon(\"libre-gui-close.svg\"),\n tip=quit_tip,\n triggered=self.close,\n )\n # View menu actions\n self.auto_refresh_action = create_action(\n self,\n _(\"Auto-refresh\"),\n icon=get_icon(\"refresh-auto.svg\"),\n tip=_(\"Auto-refresh plot when object is modified, added or removed\"),\n toggled=self.toggle_auto_refresh,\n )\n self.showlabel_action = create_action(\n self,\n _(\"Show graphical object titles\"),\n icon=get_icon(\"show_titles.svg\"),\n tip=_(\"Show or hide ROI and other graphical object titles or subtitles\"),\n toggled=self.toggle_show_titles,\n )\n\n def __add_signal_panel(self) -> None:\n \"\"\"Setup signal toolbar, widgets and panel\"\"\"\n self.signal_toolbar = self.addToolBar(_(\"Signal Processing Toolbar\"))\n curvewidget = DockablePlotWidget(self, PlotType.CURVE)\n curveplot = curvewidget.get_plot()\n curveplot.add_item(make.legend(\"TR\"))\n self.signalpanel = signal.SignalPanel(\n self, curvewidget.plotwidget, self.signal_toolbar\n )\n self.signalpanel.SIG_STATUS_MESSAGE.connect(self.statusBar().showMessage)\n return curvewidget\n\n def __add_image_panel(self) -> None:\n \"\"\"Setup image toolbar, widgets and panel\"\"\"\n self.image_toolbar = self.addToolBar(_(\"Image Processing Toolbar\"))\n imagewidget = DockablePlotWidget(self, PlotType.IMAGE)\n self.imagepanel = image.ImagePanel(\n self, imagewidget.plotwidget, self.image_toolbar\n )\n # -----------------------------------------------------------------------------\n # # Before eventually disabling the \"peritem\" mode by default, wait for the\n # # plotpy bug to be fixed (peritem mode is not compatible with multiple image\n # # items):\n # for cspanel in (\n # self.imagepanel.plotwidget.get_xcs_panel(),\n # self.imagepanel.plotwidget.get_ycs_panel(),\n # ):\n # cspanel.peritem_ac.setChecked(False)\n # -----------------------------------------------------------------------------\n self.imagepanel.SIG_STATUS_MESSAGE.connect(self.statusBar().showMessage)\n return imagewidget\n\n def __add_signal_image_panels(self) -> None:\n \"\"\"Add signal and image panels\"\"\"\n self.tabwidget = QW.QTabWidget()\n cdock = self.__add_dockwidget(self.__add_signal_panel(), title=_(\"Curve panel\"))\n idock = self.__add_dockwidget(self.__add_image_panel(), title=_(\"Image panel\"))\n self.tabifyDockWidget(cdock, idock)\n self.docks = {self.signalpanel: cdock, self.imagepanel: idock}\n self.tabwidget.currentChanged.connect(self.__tab_index_changed)\n self.signalpanel.SIG_OBJECT_ADDED.connect(\n lambda: self.set_current_panel(\"signal\")\n )\n self.imagepanel.SIG_OBJECT_ADDED.connect(\n lambda: self.set_current_panel(\"image\")\n )\n for panel in (self.signalpanel, self.imagepanel):\n panel.setup_panel()\n\n def __setup_central_widget(self) -> None:\n \"\"\"Setup central widget (main panel)\"\"\"\n self.tabwidget.setMaximumWidth(500)\n self.tabwidget.addTab(self.signalpanel, get_icon(\"signal.svg\"), _(\"Signals\"))\n self.tabwidget.addTab(self.imagepanel, get_icon(\"image.svg\"), _(\"Images\"))\n self.setCentralWidget(self.tabwidget)\n\n @staticmethod\n def __get_local_doc_path() -> str | None:\n \"\"\"Return local documentation path, if it exists\"\"\"\n locale = QC.QLocale.system().name()\n for suffix in (\"_\" + locale[:2], \"_en\"):\n path = osp.join(DATAPATH, \"doc\", f\"{APP_NAME}{suffix}.pdf\")\n if osp.isfile(path):\n return path\n return None\n\n def __add_menus(self) -> None:\n \"\"\"Adding menus\"\"\"\n self.file_menu = self.menuBar().addMenu(_(\"File\"))\n configure_menu_about_to_show(self.file_menu, self.__update_file_menu)\n self.edit_menu = self.menuBar().addMenu(_(\"&Edit\"))\n self.operation_menu = self.menuBar().addMenu(_(\"Operations\"))\n self.processing_menu = self.menuBar().addMenu(_(\"Processing\"))\n self.computing_menu = self.menuBar().addMenu(_(\"Computing\"))\n self.plugins_menu = self.menuBar().addMenu(_(\"Plugins\"))\n self.view_menu = self.menuBar().addMenu(_(\"&View\"))\n configure_menu_about_to_show(self.view_menu, self.__update_view_menu)\n self.help_menu = self.menuBar().addMenu(\"?\")\n for menu in (\n self.edit_menu,\n self.operation_menu,\n self.processing_menu,\n self.computing_menu,\n self.plugins_menu,\n ):\n configure_menu_about_to_show(menu, self.__update_generic_menu)\n help_menu_actions = [\n create_action(\n self,\n _(\"Online documentation\"),\n icon=get_icon(\"libre-gui-help.svg\"),\n triggered=lambda: webbrowser.open(__docurl__),\n ),\n ]\n localdocpath = self.__get_local_doc_path()\n if localdocpath is not None:\n help_menu_actions += [\n create_action(\n self,\n _(\"PDF documentation\"),\n icon=get_icon(\"help_pdf.svg\"),\n triggered=lambda: webbrowser.open(localdocpath),\n ),\n ]\n help_menu_actions += [None]\n if TEST_SEGFAULT_ERROR:\n help_menu_actions += [\n create_action(\n self,\n _(\"Test segfault/Python error\"),\n triggered=self.test_segfault_error,\n )\n ]\n help_menu_actions += [\n create_action(\n self,\n _(\"Log files\") + \"...\",\n icon=get_icon(\"logs.svg\"),\n triggered=self.__show_logviewer,\n ),\n create_action(\n self,\n _(\"Installation and configuration\") + \"...\",\n icon=get_icon(\"libre-toolbox.svg\"),\n triggered=lambda: instconfviewer.exec_cdl_installconfig_dialog(self),\n ),\n None,\n create_action(\n self,\n _(\"Project home page\"),\n icon=get_icon(\"libre-gui-globe.svg\"),\n triggered=lambda: webbrowser.open(__homeurl__),\n ),\n create_action(\n self,\n _(\"Bug report or feature request\"),\n icon=get_icon(\"libre-gui-globe.svg\"),\n triggered=lambda: webbrowser.open(__supporturl__),\n ),\n create_action(\n self,\n _(\"Check critical dependencies...\"),\n triggered=self.__check_dependencies,\n ),\n create_action(\n self,\n _(\"About...\"),\n icon=get_icon(\"libre-gui-about.svg\"),\n triggered=self.__about,\n ),\n ]\n add_actions(self.help_menu, help_menu_actions)\n\n def __setup_console(self) -> None:\n \"\"\"Add an internal console\"\"\"\n ns = {\n \"cdl\": self,\n \"np\": np,\n \"sps\": sps,\n \"spi\": spi,\n \"os\": os,\n \"sys\": sys,\n \"osp\": osp,\n \"time\": time,\n }\n msg = (\n \"Welcome to DataLab console!\\n\"\n \"---------------------------\\n\"\n \"You can access the main window with the 'cdl' variable.\\n\"\n \"Example:\\n\"\n \" o = cdl.get_object() # returns currently selected object\\n\"\n \" o = cdl[1] # returns object number 1\\n\"\n \" o = cdl['My image'] # returns object which title is 'My image'\\n\"\n \" o.data # returns object data\\n\"\n \"Modules imported at startup: \"\n \"os, sys, os.path as osp, time, \"\n \"numpy as np, scipy.signal as sps, scipy.ndimage as spi\"\n )\n self.console = DockableConsole(self, namespace=ns, message=msg, debug=DEBUG)\n self.console.setMaximumBlockCount(Conf.console.max_line_count.get(5000))\n self.console.go_to_error.connect(go_to_error)\n console_dock = self.__add_dockwidget(self.console, _(\"Console\"))\n console_dock.hide()\n self.console.interpreter.widget_proxy.sig_new_prompt.connect(\n lambda txt: self.repopulate_panel_trees()\n )\n\n def __add_macro_panel(self) -> None:\n \"\"\"Add macro panel\"\"\"\n self.macropanel = macro.MacroPanel()\n mdock = self.__add_dockwidget(self.macropanel, _(\"Macro manager\"))\n self.docks[self.macropanel] = mdock\n self.tabifyDockWidget(self.docks[self.imagepanel], mdock)\n self.docks[self.signalpanel].raise_()\n\n def __configure_panels(self) -> None:\n \"\"\"Configure panels\"\"\"\n # Connectings signals\n for panel in self.panels:\n panel.SIG_OBJECT_ADDED.connect(self.set_modified)\n panel.SIG_OBJECT_REMOVED.connect(self.set_modified)\n self.macropanel.SIG_OBJECT_MODIFIED.connect(self.set_modified)\n # Initializing common panel actions\n self.auto_refresh_action.setChecked(Conf.view.auto_refresh.get(True))\n self.showlabel_action.setChecked(Conf.view.show_label.get(False))\n # Restoring current tab from last session\n tab_idx = Conf.main.current_tab.get(None)\n if tab_idx is not None:\n self.tabwidget.setCurrentIndex(tab_idx)\n # Set focus on current panel, so that keyboard shortcuts work (Fixes #10)\n self.tabwidget.currentWidget().setFocus()\n\n def set_process_isolation_enabled(self, state: bool) -> None:\n \"\"\"Enable/disable process isolation\n\n Args:\n state (bool): True to enable process isolation\n \"\"\"\n for processor in (self.imagepanel.processor, self.signalpanel.processor):\n processor.set_process_isolation_enabled(state)\n\n # ------Remote control\n @remote_controlled\n def get_current_panel(self) -> str:\n \"\"\"Return current panel name\n\n Returns:\n str: panel name (valid values: \"signal\", \"image\", \"macro\")\n \"\"\"\n panel = self.tabwidget.currentWidget()\n dock = self.docks[panel]\n if panel is self.signalpanel and dock.isVisible():\n return \"signal\"\n if panel is self.imagepanel and dock.isVisible():\n return \"image\"\n return \"macro\"\n\n @remote_controlled\n def set_current_panel(self, panel: str) -> None:\n \"\"\"Switch to panel.\n\n Args:\n panel (str): panel name (valid values: \"signal\", \"image\", \"macro\")\n\n Raises:\n ValueError: unknown panel\n \"\"\"\n if self.get_current_panel() == panel:\n if panel in (\"signal\", \"image\"):\n # Force tab index changed event to be sure that the dock associated\n # to the current panel is raised\n self.__tab_index_changed(self.tabwidget.currentIndex())\n return\n if panel == \"signal\":\n self.tabwidget.setCurrentWidget(self.signalpanel)\n elif panel == \"image\":\n self.tabwidget.setCurrentWidget(self.imagepanel)\n elif panel == \"macro\":\n self.docks[self.macropanel].raise_()\n else:\n raise ValueError(f\"Unknown panel {panel}\")\n\n @remote_controlled\n def calc(self, name: str, param: gds.DataSet | None = None) -> None:\n \"\"\"Call compute function `name` in current panel's processor\n\n Args:\n name (str): function name\n param (guidata.dataset.DataSet): optional parameters\n (default: None)\n\n Raises:\n ValueError: unknown function\n \"\"\"\n panel = self.tabwidget.currentWidget()\n if isinstance(panel, base.BaseDataPanel):\n for funcname in (name, f\"compute_{name}\"):\n func = getattr(panel.processor, funcname, None)\n if func is not None:\n break\n else:\n raise ValueError(f\"Unknown function {funcname}\")\n if param is None:\n func()\n else:\n func(param)\n\n # ------GUI refresh\n def has_objects(self) -> bool:\n \"\"\"Return True if sig/ima panels have any object\"\"\"\n return sum(len(panel) for panel in self.panels) > 0\n\n def set_modified(self, state: bool = True) -> None:\n \"\"\"Set mainwindow modified state\"\"\"\n state = state and self.has_objects()\n self.__is_modified = state\n self.setWindowTitle(APP_NAME + (\"*\" if state else \"\"))\n\n def __add_dockwidget(self, child, title: str) -> QW.QDockWidget:\n \"\"\"Add QDockWidget and toggleViewAction\"\"\"\n dockwidget, location = child.create_dockwidget(title)\n self.addDockWidget(location, dockwidget)\n return dockwidget\n\n def repopulate_panel_trees(self) -> None:\n \"\"\"Repopulate all panel trees\"\"\"\n for panel in self.panels:\n if isinstance(panel, base.BaseDataPanel):\n panel.objview.populate_tree()\n\n def __update_actions(self) -> None:\n \"\"\"Update selection dependent actions\"\"\"\n is_signal = self.tabwidget.currentWidget() is self.signalpanel\n panel = self.signalpanel if is_signal else self.imagepanel\n panel.selection_changed()\n self.signal_toolbar.setVisible(is_signal)\n self.image_toolbar.setVisible(not is_signal)\n if self.plugins_menu is not None:\n plugin_actions = panel.get_category_actions(ActionCategory.PLUGINS)\n self.plugins_menu.setEnabled(len(plugin_actions) > 0)\n\n def __tab_index_changed(self, index: int) -> None:\n \"\"\"Switch from signal to image mode, or vice-versa\"\"\"\n dock = self.docks[self.tabwidget.widget(index)]\n dock.raise_()\n self.__update_actions()\n\n def __update_generic_menu(self, menu: QW.QMenu | None = None) -> None:\n \"\"\"Update menu before showing up -- Generic method\"\"\"\n if menu is None:\n menu = self.sender()\n menu.clear()\n panel = self.tabwidget.currentWidget()\n category = {\n self.file_menu: ActionCategory.FILE,\n self.edit_menu: ActionCategory.EDIT,\n self.view_menu: ActionCategory.VIEW,\n self.operation_menu: ActionCategory.OPERATION,\n self.processing_menu: ActionCategory.PROCESSING,\n self.computing_menu: ActionCategory.COMPUTING,\n self.plugins_menu: ActionCategory.PLUGINS,\n }[menu]\n actions = panel.get_category_actions(category)\n add_actions(menu, actions)\n\n def __update_file_menu(self) -> None:\n \"\"\"Update file menu before showing up\"\"\"\n self.saveh5_action.setEnabled(self.has_objects())\n self.__update_generic_menu(self.file_menu)\n add_actions(\n self.file_menu,\n [\n None,\n self.openh5_action,\n self.saveh5_action,\n self.browseh5_action,\n None,\n self.settings_action,\n ],\n )\n if self.quit_action is not None:\n add_actions(self.file_menu, [None, self.quit_action])\n\n def __update_view_menu(self) -> None:\n \"\"\"Update view menu before showing up\"\"\"\n self.__update_generic_menu(self.view_menu)\n add_actions(self.view_menu, [None] + self.createPopupMenu().actions())\n\n @remote_controlled\n def toggle_show_titles(self, state: bool) -> None:\n \"\"\"Toggle show annotations option\n\n Args:\n state: state\n \"\"\"\n Conf.view.show_label.set(state)\n for datapanel in (self.signalpanel, self.imagepanel):\n for obj in datapanel.objmodel:\n obj.set_metadata_option(\"showlabel\", state)\n datapanel.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n @remote_controlled\n def toggle_auto_refresh(self, state: bool) -> None:\n \"\"\"Toggle auto refresh option\n\n Args:\n state: state\n \"\"\"\n Conf.view.auto_refresh.set(state)\n for datapanel in (self.signalpanel, self.imagepanel):\n datapanel.plothandler.set_auto_refresh(state)\n\n # ------Common features\n @remote_controlled\n def reset_all(self) -> None:\n \"\"\"Reset all application data\"\"\"\n for panel in self.panels:\n if panel is not None:\n panel.remove_all_objects()\n\n @staticmethod\n def __check_h5file(filename: str, operation: str) -> str:\n \"\"\"Check HDF5 filename\"\"\"\n filename = osp.abspath(osp.normpath(filename))\n bname = osp.basename(filename)\n if operation == \"load\" and not osp.isfile(filename):\n raise IOError(f'File not found \"{bname}\"')\n if not filename.endswith(\".h5\"):\n raise IOError(f'Invalid HDF5 file \"{bname}\"')\n Conf.main.base_dir.set(filename)\n return filename\n\n @remote_controlled\n def save_to_h5_file(self, filename=None) -> None:\n \"\"\"Save to a DataLab HDF5 file\n\n Args:\n filename (str): HDF5 filename. If None, a file dialog is opened.\n\n Raises:\n IOError: if filename is invalid or file cannot be saved.\n \"\"\"\n if filename is None:\n basedir = Conf.main.base_dir.get()\n with qth.save_restore_stds():\n filename, _fl = getsavefilename(self, _(\"Save\"), basedir, \"HDF5 (*.h5)\")\n if not filename:\n return\n with qth.qt_try_loadsave_file(self, filename, \"save\"):\n filename = self.__check_h5file(filename, \"save\")\n self.h5inputoutput.save_file(filename)\n self.set_modified(False)\n\n @remote_controlled\n def open_h5_files(\n self,\n h5files: list[str] | None = None,\n import_all: bool | None = None,\n reset_all: bool | None = None,\n ) -> None:\n \"\"\"Open a DataLab HDF5 file or import from any other HDF5 file.\n\n Args:\n h5files: HDF5 filenames (optionally with dataset name, separated by \":\")\n import_all (bool): Import all datasets from HDF5 files\n reset_all (bool): Reset all application data before importing\n\n Returns:\n None\n \"\"\"\n if not self.confirm_memory_state():\n return\n if reset_all is None:\n reset_all = False\n if self.has_objects():\n answer = QW.QMessageBox.question(\n self,\n _(\"Warning\"),\n _(\n \"Do you want to remove all signals and images \"\n \"before importing data from HDF5 files?\"\n ),\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.Yes:\n reset_all = True\n if h5files is None:\n basedir = Conf.main.base_dir.get()\n with qth.save_restore_stds():\n h5files, _fl = getopenfilenames(self, _(\"Open\"), basedir, \"HDF5 (*.h5)\")\n for fname_with_dset in h5files:\n if \",\" in fname_with_dset:\n filename, dsetname = fname_with_dset.split(\",\")\n else:\n filename, dsetname = fname_with_dset, None\n if import_all is None and dsetname is None:\n self.import_h5_file(filename, reset_all)\n else:\n with qth.qt_try_loadsave_file(self, filename, \"load\"):\n filename = self.__check_h5file(filename, \"load\")\n if dsetname is None:\n self.h5inputoutput.open_file(filename, import_all, reset_all)\n else:\n self.h5inputoutput.import_dataset_from_file(filename, dsetname)\n reset_all = False\n\n @remote_controlled\n def import_h5_file(self, filename: str, reset_all: bool | None = None) -> None:\n \"\"\"Import HDF5 file into DataLab\n\n Args:\n filename (str): HDF5 filename (optionally with dataset name,\n separated by \":\")\n reset_all (bool): Delete all DataLab signals/images before importing data\n\n Returns:\n None\n \"\"\"\n with qth.qt_try_loadsave_file(self, filename, \"load\"):\n filename = self.__check_h5file(filename, \"load\")\n self.h5inputoutput.import_file(filename, False, reset_all)\n\n # This method is intentionally *not* remote controlled\n # (see TODO regarding RemoteClient.add_object method)\n # @remote_controlled\n def add_object(self, obj: SignalObj | ImageObj) -> None:\n \"\"\"Add object - signal or image\n\n Args:\n obj (SignalObj or ImageObj): object to add (signal or image)\n \"\"\"\n if self.confirm_memory_state():\n if isinstance(obj, SignalObj):\n self.signalpanel.add_object(obj)\n elif isinstance(obj, ImageObj):\n self.imagepanel.add_object(obj)\n else:\n raise TypeError(f\"Unsupported object type {type(obj)}\")\n\n @remote_controlled\n def open_object(self, filename: str) -> None:\n \"\"\"Open object from file in current panel (signal/image)\n\n Args:\n filename (str): HDF5 filename\n\n Returns:\n None\n \"\"\"\n panel = self.tabwidget.currentWidget()\n panel.open_object(filename)\n\n # ------Other methods related to AbstractCDLControl interface\n def get_version(self) -> str:\n \"\"\"Return DataLab version.\n\n Returns:\n str: DataLab version\n \"\"\"\n return __version__\n\n def close_application(self) -> None: # Implementing AbstractCDLControl interface\n \"\"\"Close DataLab application\"\"\"\n self.close()\n\n def raise_window(self) -> None: # Implementing AbstractCDLControl interface\n \"\"\"Raise DataLab window\"\"\"\n bring_to_front(self)\n\n def add_signal(\n self,\n title: str,\n xdata: np.ndarray,\n ydata: np.ndarray,\n xunit: str | None = None,\n yunit: str | None = None,\n xlabel: str | None = None,\n ylabel: str | None = None,\n ) -> bool: # pylint: disable=too-many-arguments\n \"\"\"Add signal data to DataLab.\n\n Args:\n title (str): Signal title\n xdata (numpy.ndarray): X data\n ydata (numpy.ndarray): Y data\n xunit (str | None): X unit. Defaults to None.\n yunit (str | None): Y unit. Defaults to None.\n xlabel (str | None): X label. Defaults to None.\n ylabel (str | None): Y label. Defaults to None.\n\n Returns:\n bool: True if signal was added successfully, False otherwise\n\n Raises:\n ValueError: Invalid xdata dtype\n ValueError: Invalid ydata dtype\n \"\"\"\n obj = create_signal(\n title,\n xdata,\n ydata,\n units=(xunit, yunit),\n labels=(xlabel, ylabel),\n )\n self.add_object(obj)\n return True\n\n def add_image(\n self,\n title: str,\n data: np.ndarray,\n xunit: str | None = None,\n yunit: str | None = None,\n zunit: str | None = None,\n xlabel: str | None = None,\n ylabel: str | None = None,\n zlabel: str | None = None,\n ) -> bool: # pylint: disable=too-many-arguments\n \"\"\"Add image data to DataLab.\n\n Args:\n title (str): Image title\n data (numpy.ndarray): Image data\n xunit (str | None): X unit. Defaults to None.\n yunit (str | None): Y unit. Defaults to None.\n zunit (str | None): Z unit. Defaults to None.\n xlabel (str | None): X label. Defaults to None.\n ylabel (str | None): Y label. Defaults to None.\n zlabel (str | None): Z label. Defaults to None.\n\n Returns:\n bool: True if image was added successfully, False otherwise\n\n Raises:\n ValueError: Invalid data dtype\n \"\"\"\n obj = create_image(\n title,\n data,\n units=(xunit, yunit, zunit),\n labels=(xlabel, ylabel, zlabel),\n )\n self.add_object(obj)\n return True\n\n # ------?\n def __about(self) -> None: # pragma: no cover\n \"\"\"About dialog box\"\"\"\n self.check_stable_release()\n if self.remote_server.port is None:\n xrpcstate = '<font color=\"red\">' + _(\"not started\") + \"</font>\"\n else:\n xrpcstate = _(\"started (port %s)\") % self.remote_server.port\n xrpcstate = f\"<font color='green'>{xrpcstate}</font>\"\n if Conf.main.process_isolation_enabled.get():\n pistate = \"<font color='green'>\" + _(\"enabled\") + \"</font>\"\n else:\n pistate = \"<font color='red'>\" + _(\"disabled\") + \"</font>\"\n adv_conf = \"<br>\".join(\n [\n \"<i>\" + _(\"Advanced configuration:\") + \"</i>\",\n \"• \" + _(\"XML-RPC server:\") + \" \" + xrpcstate,\n \"• \" + _(\"Process isolation:\") + \" \" + pistate,\n ]\n )\n pinfos = PluginRegistry.get_plugin_infos()\n created_by = _(\"Created by\")\n dev_by = _(\"Developed and maintained by %s open-source project team\") % APP_NAME\n copyrght = \"2023 Codra\"\n QW.QMessageBox.about(\n self,\n _(\"About\") + \" \" + APP_NAME,\n f\"\"\"<b>{APP_NAME}</b> v{__version__}<br>{APP_DESC}\n <p>{created_by} Pierre Raybaut<br>{dev_by}<br>Copyright &copy; {copyrght}\n <p>{adv_conf}<br><br>{pinfos}\"\"\",\n )\n\n def __edit_settings(self) -> None:\n \"\"\"Edit settings\"\"\"\n changed_options = edit_settings(self)\n for option in changed_options:\n if option == \"plot_toolbar_position\":\n for dock in self.docks.values():\n widget = dock.widget()\n if isinstance(widget, DockablePlotWidget):\n widget.update_toolbar_position()\n if option == \"ima_defaults\" and len(self.imagepanel) > 0:\n answer = QW.QMessageBox.question(\n self,\n _(\"Visualization settings\"),\n _(\n \"Default visualization settings have changed.<br><br>\"\n \"Do you want to update all active %s objects?\"\n )\n % _(\"image\"),\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.Yes:\n self.imagepanel.update_metadata_view_settings()\n\n def __show_logviewer(self) -> None:\n \"\"\"Show error logs\"\"\"\n logviewer.exec_cdl_logviewer_dialog(self)\n\n @staticmethod\n def test_segfault_error() -> None:\n \"\"\"Generate errors (both fault and traceback)\"\"\"\n import ctypes # pylint: disable=import-outside-toplevel\n\n ctypes.string_at(0)\n raise RuntimeError(\"!!! Testing RuntimeError !!!\")\n\n def show(self) -> None:\n \"\"\"Reimplement QMainWindow method\"\"\"\n super().show()\n if self.__old_size is not None:\n self.resize(self.__old_size)\n\n # ------Close window\n def close_properly(self) -> bool:\n \"\"\"Close properly\n\n Returns:\n bool: True if closed properly, False otherwise\n \"\"\"\n if not env.execenv.unattended and self.__is_modified:\n answer = QW.QMessageBox.warning(\n self,\n _(\"Quit\"),\n _(\n \"Do you want to save all signals and images \"\n \"to an HDF5 file before quitting DataLab?\"\n ),\n QW.QMessageBox.Yes | QW.QMessageBox.No | QW.QMessageBox.Cancel,\n )\n if answer == QW.QMessageBox.Yes:\n self.save_to_h5_file()\n if self.__is_modified:\n return False\n elif answer == QW.QMessageBox.Cancel:\n return False\n for panel in self.panels:\n if panel is not None:\n panel.close()\n if self.console is not None:\n try:\n self.console.close()\n except RuntimeError:\n # TODO: [P3] Investigate further why the following error occurs when\n # restarting the mainwindow (this is *not* a production case):\n # \"RuntimeError: wrapped C/C++ object of type DockableConsole\n # has been deleted\".\n # Another solution to avoid this error would be to really restart\n # the application (run each unit test in a separate process), but\n # it would represent too much effort for an error occuring in test\n # configurations only.\n pass\n self.reset_all()\n self.__save_pos_and_size()\n self.__unregister_plugins()\n\n # Saving current tab for next session\n Conf.main.current_tab.set(self.tabwidget.currentIndex())\n\n execenv.log(self, \"closed properly\")\n return True\n\n def closeEvent(self, event: QG.QCloseEvent) -> None:\n \"\"\"Reimplement QMainWindow method\"\"\"\n if self.hide_on_close:\n self.__old_size = self.size()\n self.hide()\n else:\n if self.close_properly():\n self.SIG_CLOSING.emit()\n event.accept()\n else:\n event.ignore()" }, { "identifier": "embedded1_unit", "path": "cdl/tests/features/embedded1_unit.py", "snippet": "class HostWidget(QW.QWidget):\nclass AbstractClientWindowMeta(type(QW.QMainWindow), abc.ABCMeta):\nclass AbstractClientWindow(QW.QMainWindow, metaclass=AbstractClientWindowMeta):\nclass AbstractHostWindow(AbstractClientWindow): # pylint: disable=abstract-method\nclass HostWindow(AbstractHostWindow):\n def __init__(self, parent=None):\n def log(self, message):\n def add_spacing(self, spacing: int) -> None:\n def add_label(self, text: str) -> None:\n def add_widget(self, obj: QW.QWidget, spacing_before: int = 0) -> None:\n def add_button(self, title, slot, spacing_before=0, icon=None):\n def add_stretch(self):\n def __init__(self):\n def sigtitle(self):\n def imatitle(self):\n def setup_window(self):\n def add_additional_buttons(self):\n def init_cdl(self):\n def raise_cdl(self):\n def close_cdl(self):\n def add_object(self, obj):\n def add_signals(self):\n def add_images(self):\n def remove_all(self):\n def remove_all(self):\n def add_additional_buttons(self):\n def import_object(self, panel, title):\n def import_signal(self):\n def import_image(self):\n def init_cdl(self):\n def cdl_was_closed(self):\n def close_cdl(self):\ndef run_host_window(klass):\ndef test_embedded_feature():\n PURPOSE = None\n INIT_BUTTON_LABEL = None\n SIG_TITLES = (\"Oscilloscope\", \"Digitizer\", \"Radiometer\", \"Voltmeter\", \"Sensor\")\n IMA_TITLES = (\n \"Camera\",\n \"Streak Camera\",\n \"Image Scanner\",\n \"Laser Beam Profiler\",\n \"Gated Imaging Camera\",\n )\n PURPOSE = _(\"This the host application, which embeds DataLab.\")\n INIT_BUTTON_LABEL = _(\"Open DataLab\")" } ]
from cdl.core.gui.main import CDLMainWindow from cdl.tests.features import embedded1_unit
13,362
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Application embedded test 2 DataLab main window is simply hidden when closing application. It is shown and raised above other windows when reopening application. """ # guitest: show
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Application embedded test 2 DataLab main window is simply hidden when closing application. It is shown and raised above other windows when reopening application. """ # guitest: show
class HostWindow(embedded1_unit.AbstractHostWindow):
1
2023-11-09 16:56:03+00:00
16k
lalalamdbf/PLSE_IDRR
src/prompt-tuning/prompt/pipeline_base.py
[ { "identifier": "InputExample", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be passed via meta.\n\n Args:\n guid (:obj:`str`, optional): A unique identifier of the example.\n text_a (:obj:`str`, optional): The placeholder for sequence of text.\n text_b (:obj:`str`, optional): A secend sequence of text, which is not always necessary.\n label (:obj:`int`, optional): The label id of the example in classification task.\n tgt_text (:obj:`Union[str,List[str]]`, optional): The target sequence of the example in a generation task..\n meta (:obj:`Dict`, optional): An optional dictionary to store arbitrary extra information for the example.\n \"\"\"\n\n def __init__(self,\n guid = None,\n text_a = \"\",\n text_b = \"\",\n label = None,\n meta: Optional[Dict] = None,\n tgt_text: Optional[Union[str,List[str]]] = None\n ):\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n self.meta = meta if meta else {}\n self.tgt_text = tgt_text\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n r\"\"\"Serialize this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n r\"\"\"Serialize this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def keys(self, keep_none=False):\n return [key for key in self.__dict__.keys() if getattr(self, key) is not None]\n\n @staticmethod\n def load_examples(path: str) -> List['InputExample']:\n \"\"\"Load a set of input examples from a file\"\"\"\n with open(path, 'rb') as fh:\n return pickle.load(fh)\n\n @staticmethod\n def save_examples(examples: List['InputExample'], path: str) -> None:\n \"\"\"Save a set of input examples to a file\"\"\"\n with open(path, 'wb') as fh:\n pickle.dump(examples, fh)" }, { "identifier": "InputFeatures", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputFeatures(dict):\n \"\"\"\n The class for input to the PLM and Prompts. To make users explicitly know the available keys,\n we define a dict with a set of predefined possible keys. The default value to any key is None.\n When use it as a dict, all the keys whose values are None are invisible.\n\n This class support most of the dict's operation (See Examples). It can also be consumed by\n pytorch's default_collate in DataLoader.\n Also a :py:meth:`to_tensor()` method is build to convert the values into torch.Tensor for torch's input.\n\n Examples:\n\n .. code-block:: python\n\n in_feat = InputFeatures(**{'input_ids':[1,4,5], 'soft_token_ids': [3,4,5]}) # init from dict\n print(in_feat.keys()) # ['input_ids, 'soft_token_ids']\n in_feat['label'] = 3 # can assign value like normal dict\n print(in_feat.keys()) # ['input_ids','label', 'soft_token_ids'] (Note that it's also ordered)\n print(in_feat['label']) # 3\n in_feat['alice'] = 0 # KeyError: Key alice not in predefined set of keys\n in_feat.values() # [[1,4,5], 3, [3,4,5]] (Note that it's also ordered)\n [in_feat[key] for key in in_feat] # [[1,4,5], 3, [3,4,5]]\n new_dict= {**in_feat, 'new_key':2} # new_dict is {'input_ids': [1, 4, 5], 'label': 3, 'soft_token_ids': [3, 4, 5], 'new_key': 2}\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded)\n tokens.\n token_type_ids: (Optional) Segment token indices to indicate first and second\n portions of the inputs. Only some models use them.\n label: (Optional) Label corresponding to the input. Int for classification problems,\n float for regression problems.\n \"\"\"\n tensorable_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','conns_index']\n all_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','guid', 'tgt_text', 'encoded_tgt_text', 'input_ids_len','conns_index']\n non_tensorable_keys = []\n\n def __init__(self,\n input_ids: Optional[Union[List, torch.Tensor]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n attention_mask: Optional[Union[List[int], torch.Tensor]] = None,\n token_type_ids: Optional[Union[List[int], torch.Tensor]] = None,\n label: Optional[Union[int, torch.Tensor]] = None,\n decoder_input_ids: Optional[Union[List, torch.Tensor]] = None,\n decoder_inputs_embeds: Optional[torch.Tensor] = None,\n soft_token_ids: Optional[Union[List, torch.Tensor]] = None,\n past_key_values: Optional[torch.Tensor] = None, # for prefix_tuning\n loss_ids: Optional[Union[List, torch.Tensor]] = None,\n guid: Optional[str] = None,\n tgt_text: Optional[str] = None,\n use_cache: Optional[bool] = None,\n encoded_tgt_text: Optional[str] = None,\n input_ids_len: Optional[int] = None,\n conns_index = None,\n **kwargs):\n\n self.input_ids = input_ids\n self.inputs_embeds = inputs_embeds\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n self.decoder_input_ids = decoder_input_ids\n self.decoder_inputs_embeds = decoder_inputs_embeds\n self.soft_token_ids = soft_token_ids\n self.past_key_values = past_key_values\n self.loss_ids = loss_ids\n self.guid = guid\n self.tgt_text = tgt_text\n self.encoded_tgt_text = encoded_tgt_text\n self.use_cache = use_cache\n self.input_ids_len = input_ids_len\n self.conns_index = conns_index\n\n for k in kwargs.keys():\n setattr(self, k, kwargs[k])\n\n @classmethod\n def add_tensorable_keys(cls, *args):\n cls.tensorable_keys.extend(args)\n\n @classmethod\n def add_not_tensorable_keys(cls, *args):\n cls.not_tensorable_keys.extend(args)\n\n @classmethod\n def add_keys(cls, *args):\n cls.all_keys.extend(args)\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def __len__(self):\n return len(self.keys())\n\n def to_tensor(self, device: str = 'cuda'):\n \"\"\"inplace operation, convert all tensorable features into :obj:`torch.tensor`\"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, torch.tensor(value))\n return self\n\n def to(self, device: str = \"cuda:0\"):\n r\"\"\"move the tensor keys to runtime device, such as gpu:0\n \"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, value.to(device))\n return self\n\n def cuda(self, device: str = \"cuda:0\"):\n r\"\"\"mimic the tensor behavior\n \"\"\"\n return self.to(device)\n\n def to_json_string(self, keep_none=False):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if isinstance(value, torch.Tensor):\n data[key] = value.detach().cpu().tolist()\n elif value is None and keep_none:\n data[key] = None\n else:\n data[key] = value\n return json.dumps(data) + \"\\n\"\n\n def keys(self, keep_none=False) -> List[str]:\n \"\"\"get all keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[str]`: keys of the InputFeatures\n \"\"\"\n if keep_none:\n return self.all_keys\n else:\n return [key for key in self.all_keys if getattr(self, key) is not None]\n\n def to_dict(self, keep_none=False) -> Dict[str, Any]:\n \"\"\"get the dict of mapping from keys to values of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`Dict[str, Any]`: dict of mapping from keys to values of the InputFeatures\n \"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if value is not None:\n data[key] = value\n elif value is None and keep_none:\n data[key] = None\n return data\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __iter__(self):\n return iter(self.keys())\n\n def __setitem__(self, key, item):\n if key not in self.all_keys:\n raise KeyError(\"Key {} not in predefined set of keys\".format(key))\n setattr(self, key, item)\n\n def values(self, keep_none=False) -> List[Any]:\n \"\"\"get the values with respect to the keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the values with respect to the keys of the InputFeatures\n \"\"\"\n return [getattr(self, key) for key in self.keys(keep_none=keep_none)]\n\n def __contains__(self, key, keep_none=False):\n return key in self.keys(keep_none)\n\n def items(self,):\n \"\"\"get the (key, value) pairs of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the (key, value) pairs of the InputFeatures\n \"\"\"\n return [(key, self.__getitem__(key)) for key in self.keys()]\n\n @staticmethod\n def collate_fct(batch: List):\n r'''\n This function is used to collate the input_features.\n\n Args:\n batch (:obj:`List[Union[Dict, InputFeatures]]`): A batch of the current data.\n\n Returns:\n :obj:`InputFeatures`: Return the :py:class:`~openprompt.data_utils.data_utils.InputFeatures of the current batch of data.\n '''\n\n\n elem = batch[0]\n return_dict = {}\n for key in elem:\n if key == \"encoded_tgt_text\":\n return_dict[key] = [d[key] for d in batch]\n else:\n try:\n return_dict[key] = default_collate([d[key] for d in batch])\n except:\n print(f\"key{key}\\n d {[batch[i][key] for i in range(len(batch))]} \")\n\n return InputFeatures(**return_dict)" }, { "identifier": "TokenizerWrapper", "path": "src/prompt-tuning/prompt/utils.py", "snippet": "class TokenizerWrapper:\n def __init__(self,\n max_seq_length: int,\n tokenizer: PreTrainedTokenizer,\n # truncate_method: Optional[str] = 'tail',\n create_token_type_ids: Optional[str] = False,\n segment_emb: Optional[str] = False,\n **kwargs):\n self.max_seq_length = max_seq_length\n\n self.tokenizer = tokenizer\n self.truncate_fct = self.truncate_from_tail\n\n self.create_token_type_ids = create_token_type_ids\n self.segment_emb = segment_emb\n\n self.template_mask_token = '<mask>'\n # self.template_eos_token = '<eos>'\n # self.template_bos_token = '<bos>'\n self.template_sep_token = '<sep>'\n self.template_cls_token = '<cls>'\n self.template_pad_token = '<pad>'\n\n from transformers import logging\n verbosity_before = logging.get_verbosity()\n logging.set_verbosity(logging.CRITICAL) # TODO solve this in a more elegant way\n self.mask_token_map = {self.template_mask_token: self.tokenizer.mask_token if hasattr(self.tokenizer, 'mask_token') else ''}\n # self.eos_token_map = {self.template_eos_token: self.tokenizer.eos_token if hasattr(self.tokenizer, 'eos_token') else ''}\n # self.bos_token_map = {self.template_bos_token: self.tokenizer.bos_token if hasattr(self.tokenizer, 'bos_token') else ''}\n self.sep_token_map = {self.template_sep_token: self.tokenizer.sep_token if hasattr(self.tokenizer, 'sep_token') else ''}\n self.cls_token_map = {self.template_cls_token: self.tokenizer.cls_token if hasattr(self.tokenizer, 'cls_token') else ''}\n self.pad_token_map = {self.template_pad_token: self.tokenizer.pad_token if hasattr(self.tokenizer, 'pad_token') else ''}\n logging.set_verbosity(verbosity_before)\n\n self.num_truncated_sentences = 0\n self.total_passed_sentences = 0\n\n @property\n def truncate_rate(self,):\n r\"\"\"Using this function, one can easily identify how many sentence has be truncated, thus help the user to choose a better thresthold for chunking.\n \"\"\"\n if self.total_passed_sentences==0:\n return None\n else:\n return self.num_truncated_sentences/self.total_passed_sentences\n\n @property\n def special_tokens_maps(self,) -> Dict:\n r\"\"\"This need to be specified in specific language model\n \"\"\"\n if not hasattr(self, \"_special_tokens_map\"):\n _special_tokens_map = {}\n for attrname in self.__dict__.keys():\n if attrname.endswith('_token_map'):\n _special_tokens_map.update(getattr(self, attrname))\n return _special_tokens_map\n\n def tokenize_with_mask(self,\n wrapped_example: List[Dict],\n ) -> InputFeatures:\n raise NotImplementedError\n\n def tokenize_without_mask(self,\n wrapped_example: List[Dict],\n ) -> InputFeatures:\n raise NotImplementedError\n\n\n @staticmethod\n def truncate_from_tail(input_dict: Dict,\n num_tokens_to_truncate: int=0) -> Dict:\n r\"\"\"truncate the inputs from the rear\n \"\"\"\n truncated_example = defaultdict(list)\n shortenable_ids = input_dict['shortenable_ids']\n for key in input_dict:\n parts = input_dict[key]\n to_trunc = num_tokens_to_truncate\n for i, part in enumerate(parts[::-1]):\n if len(part) == 0: # to prevent some part are empty after tokenization\n continue\n if shortenable_ids[-1-i][0]==0: # ==0 means the part is not shortenable\n continue\n parts[-1-i] = part[:-to_trunc] if to_trunc<len(part) else []\n to_trunc -= len(part)\n if to_trunc <= 0:\n break\n truncated_example[key] = parts\n return truncated_example\n\n\n @staticmethod\n def concate_parts(input_dict: Dict) -> Dict:\n for key in input_dict:\n input_dict[key] = list(itertools.chain(*input_dict[key]))\n return input_dict\n\n @staticmethod\n def padding(input_dict: Dict,\n max_len: int, pad_id_for_inputs: int=0, pad_id_for_others: int=0) -> None:\n for key, value in input_dict.items():\n if (len(input_dict[key]) > max_len):\n raise ValueError(f'''Truncated seq length of '{key}' still greater than max length {max_len}.\"\\\n \"One possible reason is that no enough shortenable parts in template. Try adding {{\"shortenable\": \"True\"}} property.\n ''')\n if 'input' in key:\n input_dict[key].extend([pad_id_for_inputs]*(max_len-len(value)))\n else:\n input_dict[key].extend([pad_id_for_others]*(max_len-len(value)))\n return input_dict\n\n\n def add_special_tokens(self, encoder_inputs):\n # add special tokens\n for key in encoder_inputs:\n if key == \"input_ids\":\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n encoder_inputs[key] = self.tokenizer.build_inputs_with_special_tokens(\n encoder_inputs[key])\n else:\n special_tokens_mask = np.array(self.tokenizer.get_special_tokens_mask(encoder_inputs[key]))\n with_special_tokens = np.array(self.tokenizer.build_inputs_with_special_tokens(encoder_inputs[key]))\n if key in [\"soft_token_ids\"]: # TODO maybe more than this\n encoder_inputs[key] = ((1-special_tokens_mask) * with_special_tokens).tolist() # use 0 as special\n else:\n encoder_inputs[key] = ((1-special_tokens_mask) * with_special_tokens - special_tokens_mask*100).tolist() # use -100 as special\n return encoder_inputs\n\n def truncate(self, encoder_inputs):\n total_tokens = sum([len(part) for part in encoder_inputs['input_ids']])\n num_specials = self.num_special_tokens_to_add\n num_tokens_to_truncate = total_tokens - self.max_seq_length + num_specials\n self.total_passed_sentences+=1\n if num_tokens_to_truncate>0:\n self.num_truncated_sentences += 1\n encoder_inputs = self.truncate_fct(input_dict=encoder_inputs,\n num_tokens_to_truncate=num_tokens_to_truncate)\n return encoder_inputs" }, { "identifier": "Template", "path": "src/prompt-tuning/prompt/prompt_base.py", "snippet": "class Template(nn.Module):\n r'''\n Base class for all the templates.\n Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.\n '''\n\n registered_inputflag_names = [\"loss_ids\", \"shortenable_ids\"]\n\n def __init__(self,\n tokenizer: PreTrainedTokenizer,\n placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.placeholder_mapping = placeholder_mapping\n self._in_on_text_set = False\n\n self.mixed_token_start = \"{\"\n self.mixed_token_end = \"}\"\n\n\n def get_default_loss_ids(self) -> List[int]:\n '''Get the loss indices for the template using mask.\n e.g. when self.text is ``'{\"placeholder\": \"text_a\"}. {\"meta\": \"word\"} is {\"mask\"}.'``,\n output is ``[0, 0, 0, 0, 1, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]:\n\n - 1 for a masked tokens.\n - 0 for a sequence tokens.\n '''\n return [1 if 'mask' in d else 0 for d in self.text]\n\n def get_default_shortenable_ids(self) -> List[int]:\n \"\"\"Every template needs shortenable_ids, denoting which part of the template can be truncate to fit\n the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other\n special tokens are not shortenable.\n\n e.g. when self.text is ``'{\"placeholder\": \"text_a\"} {\"placeholder\": \"text_b\", \"shortenable\": False} {\"meta\": \"word\"} is {\"mask\"}.'``,\n output is ``[1, 0, 0, 0, 0, 0, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range ``[0, 1]``:\n\n - 1 for the input tokens.\n - 0 for the template sequence tokens.\n \"\"\"\n idx = []\n for d in self.text:\n if 'shortenable' in d:\n idx.append(1 if d['shortenable'] else 0)\n else:\n idx.append(1 if 'placeholder' in d else 0)\n return idx\n\n def get_default_soft_token_ids(self) -> List[int]:\n r'''\n This function identifies which tokens are soft tokens.\n\n Sometimes tokens in the template are not from the vocabulary,\n but a sequence of soft tokens.\n In this case, you need to implement this function\n\n Raises:\n NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.\n '''\n raise NotImplementedError\n\n def incorporate_text_example(self,\n example: InputExample,\n text = None,\n ):\n if text is None:\n text = self.text.copy()\n else:\n text = text.copy()\n\n for i, d in enumerate(text):\n if 'placeholder' in d:\n text[i] = d[\"add_prefix_space\"] + d.get(\"post_processing\", lambda x:x)(getattr(example, d['placeholder']))\n elif 'meta' in d:\n text[i] = d[\"add_prefix_space\"] + d.get(\"post_processing\", lambda x:x)(example.meta[d['meta']])\n elif 'soft' in d:\n text[i] = ''; # unused\n elif 'mask' in d:\n text[i] = '<mask>'\n elif 'special' in d:\n text[i] = d['special']\n elif 'text' in d:\n text[i] = d[\"add_prefix_space\"] + d['text']\n else:\n raise ValueError(f'can not parse {d}')\n return text\n\n def _check_template_format(self, ):\n r\"\"\"check whether the template format is correct.\n TODO: add more\n \"\"\"\n mask_num = 0\n for i, d in enumerate(self.text):\n if 'mask' in d:\n mask_num += 1\n\n if mask_num==0:\n raise RuntimeError(f\"'mask' position not found in the template: {self.text}. Please Check!\")\n\n\n\n\n def parse_text(self, text: str) -> List[Dict]:\n parsed = []\n i = 0\n while i < len(text):\n d = {\"add_prefix_space\": ' ' if (i > 0 and text[i-1] == ' ') else ''}\n while i < len(text) and text[i] == ' ':\n d[\"add_prefix_space\"] = ' '\n i = i + 1\n if i == len(text): break\n\n if text[i] != self.mixed_token_start:\n j = i + 1\n while j < len(text):\n if text[j] == self.mixed_token_start:\n break\n j = j + 1\n d[\"text\"] = text[i:j].rstrip(' ')\n i = j\n\n else:\n j = i + 1\n mixed_token_cnt = 1 # { {} {} } nested support\n while j < len(text):\n if text[j] == self.mixed_token_end:\n mixed_token_cnt -= 1\n if mixed_token_cnt == 0: break\n elif text[j] == self.mixed_token_start:\n mixed_token_cnt += 1\n j = j + 1\n if j == len(text):\n raise ValueError(f\"mixed_token_start {self.mixed_token_start} at position {i} has no corresponding mixed_token_end {self.mixed_token_end}\")\n dict_str = '{'+text[i+1:j]+'}'\n try:\n val = eval(dict_str)\n if isinstance(val, set):\n val = {k: None for k in val}\n d.update(val)\n except:\n import traceback\n print(traceback.format_exc())\n print(f\"syntax error in {dict_str}\")\n exit()\n i = j + 1\n\n parsed.append(d)\n\n return parsed\n\n # @abstractmethod\n def wrap_one_example(self,\n example: InputExample) -> List[Dict]:\n r'''Given an input example which contains input text, which can be referenced\n by self.template.placeholder_mapping 's value.\n This function process the example into a list of dict,\n Each dict functions as a group, which has the sample properties, such as\n whether it's shortenable, whether it's the masked position, whether it's soft token, etc.\n Since a text will be tokenized in the subsequent processing procedure,\n these attributes are broadcasted along the tokenized sentence.\n\n Args:\n example (:obj:`InputExample`): An :py:class:`~openprompt.data_utils.data_utils.InputExample` object, which should have attributes that are able to be filled in the template.\n\n Returns:\n :obj:`List[Dict]`: A list of dict of the same length as self.text. e.g. ``[{\"loss_ids\": 0, \"text\": \"It was\"}, {\"loss_ids\": 1, \"text\": \"<mask>\"}, ]``\n '''\n\n if self.text is None:\n raise ValueError(\"template text has not been initialized\")\n if isinstance(example, InputExample):\n text = self.incorporate_text_example(example)\n\n not_empty_keys = example.keys()\n for placeholder_token in self.placeholder_mapping:\n not_empty_keys.remove(self.placeholder_mapping[placeholder_token]) # placeholder has been processed, remove\n not_empty_keys.remove('meta') # meta has been processed\n\n keys, values= ['text'], [text]\n for inputflag_name in self.registered_inputflag_names:\n keys.append(inputflag_name)\n v = None\n if hasattr(self, inputflag_name) and getattr(self, inputflag_name) is not None:\n v = getattr(self, inputflag_name)\n elif hasattr(self, \"get_default_\"+inputflag_name):\n v = getattr(self, \"get_default_\"+inputflag_name)()\n setattr(self, inputflag_name, v) # cache\n else:\n raise ValueError(\"\"\"\n Template's inputflag '{}' is registered but not initialize.\n Try using template.{} = [...] to initialize\n or create an method get_default_{}(self) in your template.\n \"\"\".format(inputflag_name, inputflag_name, inputflag_name))\n\n if len(v) != len(text):\n raise ValueError(\"Template: len({})={} doesn't match len(text)={}.\"\\\n .format(inputflag_name, len(v), len(text)))\n values.append(v)\n wrapped_parts_to_tokenize = []\n for piece in list(zip(*values)):\n wrapped_parts_to_tokenize.append(dict(zip(keys, piece)))\n\n wrapped_parts_not_tokenize = {key: getattr(example, key) for key in not_empty_keys}\n return [wrapped_parts_to_tokenize, wrapped_parts_not_tokenize]\n else:\n raise TypeError(\"InputExample\")\n\n @abstractmethod\n def process_batch(self, batch):\n r\"\"\"Template should rewrite this method if you need to process the batch input such as substituting embeddings.\n \"\"\"\n return batch # not being processed\n\n def post_processing_outputs(self, outputs):\n r\"\"\"Post processing the outputs of language models according\n to the need of template. Most templates don't need post processing,\n The template like SoftTemplate, which appends soft template as a module\n (rather than a sequence of input tokens) to the input,\n should remove the outputs on these positions to keep the seq_len the same\n \"\"\"\n return outputs\n\n def save(self,\n path: str,\n **kwargs) -> None:\n r'''\n A save method API.\n\n Args:\n path (str): A path to save your template.\n '''\n raise NotImplementedError\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, text):\n self._text = text\n if text is None:\n return\n if not self._in_on_text_set:\n self.safe_on_text_set()\n self._check_template_format()\n\n def safe_on_text_set(self) -> None:\n r\"\"\"With this wrapper function, setting text inside ``on_text_set()``\n will not trigger ``on_text_set()`` again to prevent endless recursion.\n \"\"\"\n self._in_on_text_set = True\n self.on_text_set()\n self._in_on_text_set = False\n\n @abstractmethod\n def on_text_set(self):\n r\"\"\"\n A hook to do something when template text was set.\n The designer of the template should explicitly know what should be down when the template text is set.\n \"\"\"\n raise NotImplementedError\n\n def from_file(self,\n path: str,\n choice: int = 0,\n ):\n r'''\n Read the template from a local file.\n\n Args:\n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The id-th line of the file.\n '''\n with open(path, 'r') as fin:\n text = fin.readlines()[choice].rstrip()\n self.text = text\n return self" }, { "identifier": "Verbalizer", "path": "src/prompt-tuning/prompt/prompt_base.py", "snippet": "class Verbalizer(nn.Module):\n r'''\n Base class for all the verbalizers.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n classes (:obj:`Sequence[str]`): A sequence of classes that need to be projected.\n '''\n def __init__(self,\n tokenizer: Optional[PreTrainedTokenizer] = None,\n classes: Optional[Sequence[str]] = None,\n num_classes: Optional[int] = None,\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.classes = classes\n if classes is not None and num_classes is not None:\n assert len(classes) == num_classes, \"len(classes) != num_classes, Check you config.\"\n self.num_classes = num_classes\n elif num_classes is not None:\n self.num_classes = num_classes\n elif classes is not None:\n self.num_classes = len(classes)\n else:\n self.num_classes = None\n # raise AttributeError(\"No able to configure num_classes\")\n self._in_on_label_words_set = False\n\n @property\n def label_words(self,):\n r'''\n Label words means the words in the vocabulary projected by the labels.\n E.g. if we want to establish a projection in sentiment classification: positive :math:`\\rightarrow` {`wonderful`, `good`},\n in this case, `wonderful` and `good` are label words.\n '''\n if not hasattr(self, \"_label_words\"):\n raise RuntimeError(\"label words haven't been set.\")\n return self._label_words\n\n @label_words.setter\n def label_words(self, label_words):\n if label_words is None:\n return\n self._label_words = self._match_label_words_to_label_ids(label_words)\n if not self._in_on_label_words_set:\n self.safe_on_label_words_set()\n\n def _match_label_words_to_label_ids(self, label_words): # TODO newly add function after docs written # TODO rename this function\n \"\"\"\n sort label words dict of verbalizer to match the label order of the classes\n \"\"\"\n if isinstance(label_words, dict):\n if self.classes is None:\n raise ValueError(\"\"\"\n classes attribute of the Verbalizer should be set since your given label words is a dict.\n Since we will match the label word with respect to class A, to A's index in classes\n \"\"\")\n if set(label_words.keys()) != set(self.classes):\n raise ValueError(\"name of classes in verbalizer are different from those of dataset\")\n label_words = [ # sort the dict to match dataset\n label_words[c]\n for c in self.classes\n ] # length: label_size of the whole task\n elif isinstance(label_words, list) or isinstance(label_words, tuple):\n pass\n # logger.info(\"\"\"\n # Your given label words is a list, by default, the ith label word in the list will match class i of the dataset.\n # Please make sure that they have the same order.\n # Or you can pass label words as a dict, mapping from class names to label words.\n # \"\"\")\n else:\n raise ValueError(\"Verbalizer label words must be list, tuple or dict\")\n return label_words\n\n def safe_on_label_words_set(self,):\n self._in_on_label_words_set = True\n self.on_label_words_set()\n self._in_on_label_words_set = False\n\n def on_label_words_set(self,):\n r\"\"\"A hook to do something when textual label words were set.\n \"\"\"\n pass\n\n @property\n def vocab(self,) -> Dict:\n if not hasattr(self, '_vocab'):\n self._vocab = self.tokenizer.convert_ids_to_tokens(np.arange(self.vocab_size).tolist())\n return self._vocab\n\n @property\n def vocab_size(self,) -> int:\n return self.tokenizer.vocab_size\n\n @abstractmethod\n def generate_parameters(self, **kwargs) -> List:\n r\"\"\"\n The verbalizer can be seen as an extra layer on top of the original\n pre-trained models. In manual verbalizer, it is a fixed one-hot vector of dimension\n ``vocab_size``, with the position of the label word being 1 and 0 everywhere else.\n In other situation, the parameters may be a continuous vector over the\n vocab, with each dimension representing a weight of that token.\n Moreover, the parameters may be set to trainable to allow label words selection.\n\n Therefore, this function serves as an abstract methods for generating the parameters\n of the verbalizer, and must be instantiated in any derived class.\n\n Note that the parameters need to be registered as a part of pytorch's module to\n It can be achieved by wrapping a tensor using ``nn.Parameter()``.\n \"\"\"\n raise NotImplementedError\n\n def register_calibrate_logits(self, logits: torch.Tensor):\n r\"\"\"\n This function aims to register logits that need to be calibrated, and detach the original logits from the current graph.\n \"\"\"\n if logits.requires_grad:\n logits = logits.detach()\n self._calibrate_logits = logits\n\n def process_outputs(self,\n outputs: torch.Tensor,\n conn_linear_logits = None, \n **kwargs):\n r\"\"\"By default, the verbalizer will process the logits of the PLM's\n output.\n\n Args:\n logits (:obj:`torch.Tensor`): The current logits generated by pre-trained language models.\n batch (:obj:`Union[Dict, InputFeatures]`): The input features of the data.\n \"\"\"\n if conn_linear_logits != None:\n return self.process_logits(outputs, conn_linear_logits, **kwargs)\n else:\n return self.process_logits(outputs, **kwargs)\n\n def gather_outputs(self, outputs: ModelOutput):\n r\"\"\" retrieve useful output for the verbalizer from the whole model output\n By default, it will only retrieve the logits\n\n Args:\n outputs (:obj:`ModelOutput`) The output from the pretrained language model.\n\n Return:\n :obj:`torch.Tensor` The gathered output, should be of shape (``batch_size``,\n ``seq_len``, ``any``)\n \"\"\"\n return outputs.logits\n\n @staticmethod\n def aggregate(label_words_logits: torch.Tensor) -> torch.Tensor:\n r\"\"\" To aggregate logits on multiple label words into the label's logits\n Basic aggregator: mean of each label words' logits to a label's logits\n Can be re-implemented in advanced verbaliezer.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`): The logits of the label words only.\n\n Return:\n :obj:`torch.Tensor`: The final logits calculated by the label words.\n \"\"\"\n if label_words_logits.dim()>2:\n return label_words_logits.mean(dim=-1)\n else:\n return label_words_logits\n\n\n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Given logits regarding the entire vocab, calculate the probs over the label words set by softmax.\n\n Args:\n logits(:obj:`Tensor`): The logits of the entire vocab.\n\n Returns:\n :obj:`Tensor`: The probability distribution over the label words set.\n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n @abstractmethod\n def project(self,\n logits: torch.Tensor,\n **kwargs) -> torch.Tensor:\n r\"\"\"This method receives input logits of shape ``[batch_size, vocab_size]``, and use the\n parameters of this verbalizer to project the logits over entire vocab into the\n logits of labels words.\n\n Args:\n logits (:obj:`Tensor`): The logits over entire vocab generated by the pre-trained language model with shape [``batch_size``, ``max_seq_length``, ``vocab_size``]\n\n Returns:\n :obj:`Tensor`: The normalized probs (sum to 1) of each label .\n \"\"\"\n raise NotImplementedError\n\n def handle_multi_token(self, label_words_logits, mask):\n r\"\"\"\n Support multiple methods to handle the multi tokens produced by the tokenizer.\n We suggest using 'first' or 'max' if the some parts of the tokenization is not meaningful.\n Can broadcast to 3-d tensor.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`):\n\n Returns:\n :obj:`torch.Tensor`\n \"\"\"\n if self.multi_token_handler == \"first\":\n label_words_logits = label_words_logits.select(dim=-1, index=0)\n elif self.multi_token_handler == \"max\":\n label_words_logits = label_words_logits - 1000*(1-mask.unsqueeze(0))\n label_words_logits = label_words_logits.max(dim=-1).values\n elif self.multi_token_handler == \"mean\":\n label_words_logits = (label_words_logits*mask.unsqueeze(0)).sum(dim=-1)/(mask.unsqueeze(0).sum(dim=-1)+1e-15)\n else:\n raise ValueError(\"multi_token_handler {} not configured\".format(self.multi_token_handler))\n return label_words_logits\n\n @classmethod\n \n\n def from_file(self,\n path: str,\n choice: Optional[int] = 0 ):\n r\"\"\"Load the predefined label words from verbalizer file.\n Currently support three types of file format:\n 1. a .jsonl or .json file, in which is a single verbalizer\n in dict format.\n 2. a .jsonal or .json file, in which is a list of verbalizers in dict format\n 3. a .txt or a .csv file, in which is the label words of a class are listed in line,\n separated by commas. Begin a new verbalizer by an empty line.\n This format is recommended when you don't know the name of each class.\n\n The details of verbalizer format can be seen in :ref:`How_to_write_a_verbalizer`.\n\n Args:\n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The choice of verbalizer in a file containing\n multiple verbalizers.\n\n Returns:\n Template : `self` object\n \"\"\"\n if path.endswith(\".txt\") or path.endswith(\".csv\"):\n with open(path, 'r') as f:\n lines = f.readlines()\n label_words_all = []\n label_words_single_group = []\n for line in lines:\n line = line.strip().strip(\" \")\n if line == \"\":\n if len(label_words_single_group)>0:\n label_words_all.append(label_words_single_group)\n label_words_single_group = []\n else:\n label_words_single_group.append(line)\n if len(label_words_single_group) > 0: # if no empty line in the last\n label_words_all.append(label_words_single_group)\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n\n label_words = label_words_all[choice]\n label_words = [label_words_per_label.strip().split(\",\") \\\n for label_words_per_label in label_words]\n\n elif path.endswith(\".jsonl\") or path.endswith(\".json\"):\n with open(path, \"r\") as f:\n label_words_all = json.load(f)\n # if it is a file containing multiple verbalizers\n if isinstance(label_words_all, list):\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n label_words = label_words_all[choice]\n elif isinstance(label_words_all, dict):\n label_words = label_words_all\n if choice>0:\n print(\"Choice of verbalizer is 1, but the file \\\n only contains one verbalizer.\")\n\n self.label_words = label_words\n if self.num_classes is not None:\n num_classes = len(self.label_words)\n assert num_classes==self.num_classes, 'number of classes in the verbalizer file\\\n does not match the predefined num_classes.'\n return self" } ]
from pickle import FALSE from torch.utils.data.sampler import RandomSampler from transformers.configuration_utils import PretrainedConfig from transformers.generation_utils import GenerationMixin from torch.utils.data import Dataset from typing import * from .data_utils import InputExample, InputFeatures from torch.utils.data._utils.collate import default_collate from tqdm.std import tqdm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.utils.dummy_pt_objects import PreTrainedModel from .utils import TokenizerWrapper from .prompt_base import Template, Verbalizer from collections import defaultdict from collections import namedtuple from torch.utils.data import DataLoader import torch import torch.nn as nn import inspect import numpy as np
10,931
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template,
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template,
tokenizer_wrapper: Optional[TokenizerWrapper] = None,
2
2023-11-01 08:52:36+00:00
16k
choderalab/chiron
Examples/LJ_mcmove.py
[ { "identifier": "LJPotential", "path": "chiron/potential.py", "snippet": "class LJPotential(NeuralNetworkPotential):\n def __init__(\n self,\n topology: Topology,\n sigma: unit.Quantity = 3.350 * unit.angstroms,\n epsilon: unit.Quantity = 1.0 * unit.kilocalories_per_mole,\n cutoff: unit.Quantity = unit.Quantity(1.0, unit.nanometer),\n ):\n \"\"\"\n Initialize the Lennard-Jones potential.\n\n Parameters\n ----------\n topology : Topology\n The topology of the system\n sigma : unit.Quantity, optional\n The distance at which the potential is zero, by default 3.350 * unit.angstroms\n epsilon : unit.Quantity, optional\n The depth of the potential well, by default 1.0 * unit.kilocalories_per_mole\n cutoff : unit.Quantity, optional\n The cutoff distance for the potential, by default 1.0 * unit.nanometer\n\n \"\"\"\n\n if not isinstance(topology, Topology):\n if not isinstance(topology, property):\n if topology is not None:\n raise TypeError(\n f\"Topology must be a Topology object or None, type(topology) = {type(topology)}\"\n )\n if not isinstance(sigma, unit.Quantity):\n raise TypeError(\n f\"sigma must be a unit.Quantity, type(sigma) = {type(sigma)}\"\n )\n if not isinstance(epsilon, unit.Quantity):\n raise TypeError(\n f\"epsilon must be a unit.Quantity, type(epsilon) = {type(epsilon)}\"\n )\n if not isinstance(cutoff, unit.Quantity):\n raise TypeError(\n f\"cutoff must be a unit.Quantity, type(cutoff) = {type(cutoff)}\"\n )\n\n if not sigma.unit.is_compatible(unit.angstrom):\n raise ValueError(f\"sigma must have units of distance, got {sigma.unit}\")\n if not epsilon.unit.is_compatible(unit.kilocalories_per_mole):\n raise ValueError(f\"epsilon must have units of energy, got {epsilon.unit}\")\n if not cutoff.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"cutoff must have units of distance, got {cutoff.unit}\")\n\n self.sigma = sigma.value_in_unit_system(\n unit.md_unit_system\n ) # The distance at which the potential is zero\n self.epsilon = epsilon.value_in_unit_system(\n unit.md_unit_system\n ) # The depth of the potential well\n # The cutoff for a potential is often linked with the parameters and isn't really\n # something I think we should be changing dynamically.\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.topology = topology\n\n from functools import partial\n\n @partial(jax.jit, static_argnums=(0,))\n def _compute_energy_masked(self, distance, mask):\n \"\"\"\n Compute the LJ energy based on an array representing the distances between a given particle and its neighbors.\n Since the distance array is padded to a fixed length, we need to mask out the padded values before summing the energy.\n\n Parameters\n ----------\n distance : jnp.array\n The distances between a given particle and its neighbors\n mask : jnp.array\n An array indicating which values in the distance array are valid and which are padded [1.0 or 0.0]\n \"\"\"\n\n # we can just multiply by the mask rather than using jnp.where to mask.\n energy = mask * (\n 4\n * self.epsilon\n * ((self.sigma / distance) ** 12 - (self.sigma / distance) ** 6)\n )\n return energy.sum()\n\n def compute_energy(self, positions: jnp.array, nbr_list=None, debug_mode=False):\n \"\"\"\n Compute the LJ energy.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n nbr_list : NeighborList, default=None\n Instance of a neighbor list or pair list class to use.\n If None, an unoptimized N^2 pairlist will be used without PBC conditions.\n Returns\n -------\n potential_energy : float\n The total potential energy of the system.\n\n \"\"\"\n # Compute the pair distances and displacement vectors\n\n if nbr_list is None:\n log.debug(\n \"nbr_list is None, computing using inefficient N^2 pairlist without PBC.\"\n )\n # Compute the pairlist for a given set of positions and a cutoff distance\n # Note in this case, we do not need the pairs or displacement vectors\n # Since we already calculate the distance in the pairlist computation\n # Pairs and displacement vectors are needed for an analytical evaluation of the force\n # which we will do as part of testing\n distances, displacement_vectors, pairs = self.compute_pairlist(\n positions, self.cutoff\n )\n # if our pairlist is empty, the particles are non-interacting and\n # the energy will be 0\n if distances.shape[0] == 0:\n return 0.0\n\n potential_energy = (\n 4\n * self.epsilon\n * ((self.sigma / distances) ** 12 - (self.sigma / distances) ** 6)\n )\n # sum over all pairs to get the total potential energy\n return potential_energy.sum()\n\n else:\n # ensure the neighborlist has been constructed before trying to use it\n\n if not nbr_list.is_built:\n raise ValueError(\"Neighborlist must be built before use\")\n\n # ensure that the cutoff in the neighbor list is the same as the cutoff in the potential\n if nbr_list.cutoff != self.cutoff:\n raise ValueError(\n f\"Neighborlist cutoff ({nbr_list.cutoff}) must be the same as the potential cutoff ({self.cutoff})\"\n )\n\n n_neighbors, pairs, mask, dist, displacement_vectors = nbr_list.calculate(\n positions\n )\n\n potential_energy = jax.vmap(self._compute_energy_masked, in_axes=(0))(\n dist, mask.astype(jnp.float32)\n )\n return potential_energy.sum()\n\n def compute_force(self, positions: jnp.array, nbr_list=None) -> jnp.array:\n \"\"\"\n Compute the LJ force using the negative of jax.grad.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n nbr_list : NeighborList, optional\n Instance of the neighborlist class to use. By default, set to None, which will use an N^2 pairlist\n\n Returns\n -------\n force : jnp.array\n The forces on the particles in the system\n\n \"\"\"\n # force = -jax.grad(self.compute_energy)(positions, nbr_list)\n # return force\n return super().compute_force(positions, nbr_list=nbr_list)\n\n def compute_force_analytical(\n self,\n positions: jnp.array,\n ) -> jnp.array:\n \"\"\"\n Compute the LJ force using the analytical expression for testing purposes.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n\n Returns\n -------\n force : jnp.array\n The forces on the particles in the system\n\n \"\"\"\n dist, displacement_vector, pairs = self.compute_pairlist(positions, self.cutoff)\n\n forces = (\n 24\n * (self.epsilon / (dist * dist))\n * (2 * (self.sigma / dist) ** 12 - (self.sigma / dist) ** 6)\n ).reshape(-1, 1) * displacement_vector\n\n force_array = jnp.zeros((positions.shape[0], 3))\n for force, p1, p2 in zip(forces, pairs[0], pairs[1]):\n force_array = force_array.at[p1].add(force)\n force_array = force_array.at[p2].add(-force)\n return force_array" }, { "identifier": "SamplerState", "path": "chiron/states.py", "snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.\n velocities : unit.Quantity, optional\n The velocities of the particles in the simulation.\n box_vectors : unit.Quantity, optional\n The box vectors defining the simulation's periodic boundary conditions.\n\n \"\"\"\n\n def __init__(\n self,\n x0: unit.Quantity,\n velocities: Optional[unit.Quantity] = None,\n box_vectors: Optional[unit.Quantity] = None,\n ) -> None:\n # NOTE: all units are internally in the openMM units system as documented here:\n # http://docs.openmm.org/latest/userguide/theory/01_introduction.html#units\n if not isinstance(x0, unit.Quantity):\n raise TypeError(f\"x0 must be a unit.Quantity, got {type(x0)} instead.\")\n if velocities is not None and not isinstance(velocities, unit.Quantity):\n raise TypeError(\n f\"velocities must be a unit.Quantity, got {type(velocities)} instead.\"\n )\n if box_vectors is not None and not isinstance(box_vectors, unit.Quantity):\n if isinstance(box_vectors, List):\n try:\n box_vectors = self._convert_from_openmm_box(box_vectors)\n except:\n raise TypeError(f\"Unable to parse box_vectors {box_vectors}.\")\n else:\n raise TypeError(\n f\"box_vectors must be a unit.Quantity or openMM box, got {type(box_vectors)} instead.\"\n )\n if not x0.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"x0 must have units of distance, got {x0.unit} instead.\")\n if velocities is not None and not velocities.unit.is_compatible(\n unit.nanometer / unit.picosecond\n ):\n raise ValueError(\n f\"velocities must have units of distance/time, got {velocities.unit} instead.\"\n )\n if box_vectors is not None and not box_vectors.unit.is_compatible(\n unit.nanometer\n ):\n raise ValueError(\n f\"box_vectors must have units of distance, got {box_vectors.unit} instead.\"\n )\n if box_vectors is not None and box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors must be a 3x3 array, got {box_vectors.shape} instead.\"\n )\n\n self._x0 = x0\n self._velocities = velocities\n self._box_vectors = box_vectors\n self._distance_unit = unit.nanometer\n\n @property\n def x0(self) -> jnp.array:\n return self._convert_to_jnp(self._x0)\n\n @property\n def velocities(self) -> jnp.array:\n if self._velocities is None:\n return None\n return self._convert_to_jnp(self._velocities)\n\n @property\n def box_vectors(self) -> jnp.array:\n if self._box_vectors is None:\n return None\n return self._convert_to_jnp(self._box_vectors)\n\n @x0.setter\n def x0(self, x0: Union[jnp.array, unit.Quantity]) -> None:\n if isinstance(x0, unit.Quantity):\n self._x0 = x0\n else:\n self._x0 = unit.Quantity(x0, self._distance_unit)\n\n @property\n def distance_unit(self) -> unit.Unit:\n return self._distance_unit\n\n def _convert_to_jnp(self, array: unit.Quantity) -> jnp.array:\n \"\"\"\n Convert the sampler state to jnp arrays.\n \"\"\"\n import jax.numpy as jnp\n\n array_ = array.value_in_unit_system(unit.md_unit_system)\n return jnp.array(array_)\n\n def _convert_from_openmm_box(self, openmm_box_vectors: List) -> unit.Quantity:\n box_vec = []\n for i in range(0, 3):\n layer = []\n for j in range(0, 3):\n layer.append(\n openmm_box_vectors[i][j].value_in_unit(openmm_box_vectors[0].unit)\n )\n box_vec.append(layer)\n return unit.Quantity(jnp.array(box_vec), openmm_box_vectors[0].unit)" }, { "identifier": "ThermodynamicState", "path": "chiron/states.py", "snippet": "class ThermodynamicState:\n \"\"\"\n Represents the thermodynamic state of the system.\n\n Parameters\n ----------\n potential : NeuralNetworkPotential\n The potential energy function of the system.\n temperature : unit.Quantity, optional\n The temperature of the simulation.\n volume : unit.Quantity, optional\n The volume of the simulation.\n pressure : unit.Quantity, optional\n The pressure of the simulation.\n\n \"\"\"\n\n def __init__(\n self,\n potential: Optional[NeuralNetworkPotential],\n temperature: Optional[unit.Quantity] = None,\n volume: Optional[unit.Quantity] = None,\n pressure: Optional[unit.Quantity] = None,\n ):\n self.potential = potential\n\n if temperature is not None and not isinstance(temperature, unit.Quantity):\n raise TypeError(\n f\"temperature must be a unit.Quantity, got {type(temperature)} instead.\"\n )\n elif temperature is not None:\n if not temperature.unit.is_compatible(unit.kelvin):\n raise ValueError(\n f\"temperature must have units of temperature, got {temperature.unit} instead.\"\n )\n\n if volume is not None and not isinstance(volume, unit.Quantity):\n raise TypeError(\n f\"volume must be a unit.Quantity, got {type(volume)} instead.\"\n )\n elif volume is not None:\n if not volume.unit.is_compatible(unit.nanometer**3):\n raise ValueError(\n f\"volume must have units of distance**3, got {volume.unit} instead.\"\n )\n if pressure is not None and not isinstance(pressure, unit.Quantity):\n raise TypeError(\n f\"pressure must be a unit.Quantity, got {type(pressure)} instead.\"\n )\n elif pressure is not None:\n if not pressure.unit.is_compatible(unit.atmosphere):\n raise ValueError(\n f\"pressure must have units of pressure, got {pressure.unit} instead.\"\n )\n\n self.temperature = temperature\n if temperature is not None:\n self.beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * (self.temperature))\n else:\n self.beta = None\n\n self.volume = volume\n self.pressure = pressure\n\n from .utils import get_nr_of_particles\n\n self.nr_of_particles = get_nr_of_particles(self.potential.topology)\n self._check_completness()\n\n def check_variables(self) -> None:\n \"\"\"\n Check if all necessary variables are set and log the simulation ensemble.\n \"\"\"\n variables = [\n \"temperature\",\n \"volume\",\n \"pressure\",\n ]\n set_variables = [var for var in variables if getattr(self, var) is not None]\n return set_variables\n\n def _check_completness(self):\n # check which variables are set\n set_variables = self.check_variables()\n\n if len(set_variables) == 0:\n log.info(\"No variables are set.\")\n\n # print all set variables\n for var in set_variables:\n log.info(f\"{var} is set.\")\n\n if self.temperature and self.volume and self.nr_of_particles:\n log.info(\"NVT ensemble simulated.\")\n if self.temperature and self.pressure and self.nr_of_particles:\n log.info(\"NpT ensemble is simulated.\")\n\n @classmethod\n def are_states_compatible(cls, state1, state2):\n \"\"\"\n Check if two simulation states are compatible.\n\n This method should define the criteria for compatibility,\n such as matching number of particles, etc.\n\n Parameters\n ----------\n state1 : SimulationState\n The first simulation state to compare.\n state2 : SimulationState\n The second simulation state to compare.\n\n Returns\n -------\n bool\n True if states are compatible, False otherwise.\n \"\"\"\n pass\n\n def get_reduced_potential(\n self, sampler_state: SamplerState, nbr_list=None\n ) -> float:\n \"\"\"\n Compute the reduced potential for the given sampler state.\n\n Parameters\n ----------\n sampler_state : SamplerState\n The sampler state for which to compute the reduced potential.\n nbr_list : NeighborList or PairList, optional\n The neighbor list or pair list routine to use for calculating the reduced potential.\n\n Returns\n -------\n float\n The reduced potential of the system.\n\n Notes\n -----\n The reduced potential is computed as:\n u = \\beta [U(x) + p V(x) + \\mu N(x)],\n where \\beta is the inverse temperature, p is the pressure,\n \\mu is the chemical potential, x are the atomic positions,\n U(x) is the potential energy, V(x) is the box volume,\n and N(x) is the number of particles.\n \"\"\"\n if self.beta is None:\n self.beta = 1.0 / (\n unit.BOLTZMANN_CONSTANT_kB * (self.temperature * unit.kelvin)\n )\n log.debug(f\"sample state: {sampler_state.x0}\")\n reduced_potential = (\n unit.Quantity(\n self.potential.compute_energy(sampler_state.x0, nbr_list),\n unit.kilojoule_per_mole,\n )\n ) / unit.AVOGADRO_CONSTANT_NA\n log.debug(f\"reduced potential: {reduced_potential}\")\n if self.pressure is not None:\n reduced_potential += self.pressure * self.volume\n\n return self.beta * reduced_potential\n\n def kT_to_kJ_per_mol(self, energy):\n energy = energy * unit.AVOGADRO_CONSTANT_NA\n return energy / self.beta" }, { "identifier": "NeighborListNsqrd", "path": "chiron/neighbors.py", "snippet": "class NeighborListNsqrd(PairsBase):\n \"\"\"\n N^2 neighborlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the neighborlist\n skin: float, default = 0.4\n Skin distance for the neighborlist\n n_max_neighbors: int, default=200\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n This will be checked and dynamically updated during the build stage\n Examples\n --------\n\n\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n skin: unit.Quantity = unit.Quantity(0.4, unit.nanometer),\n n_max_neighbors: float = 200,\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n if not skin.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, skin.unit = {skin.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.skin = skin.value_in_unit_system(unit.md_unit_system)\n self.cutoff_and_skin = self.cutoff + self.skin\n self.n_max_neighbors = n_max_neighbors\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate mask that allows us to remove self-interactions and double-counting of pairs\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n jnp.array\n Bool mask to remove self-interactions and double-counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i < particles_j\n\n return temp_mask\n\n @partial(jax.jit, static_argnums=(0, 5))\n def _build_neighborlist(\n self, particle_i, reduction_mask, pid, coordinates, n_max_neighbors\n ):\n \"\"\"\n Jitted function to build the neighbor list for a single particle\n\n Parameters\n ----------\n particle_i: jnp.array\n X,Y,Z coordinates of particle i\n reduction_mask: jnp.array\n Mask to exclude self-interactions and double counting of pairs\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n n_max_neighbors: int\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n\n Returns\n -------\n neighbor_list_mask: jnp.array\n Mask to exclude padding from the neighbor list\n neighbor_list: jnp.array\n List of particle ids for the neighbors, padded to n_max_neighbors\n n_neighbors: int\n Number of neighbors for the particle\n \"\"\"\n\n # calculate the displacement between particle i and all other particles\n r_ij, dist = self.space.displacement(particle_i, coordinates)\n\n # neighbor_mask will be an array of length n_particles (i.e., length of coordinates)\n # where each element is True if the particle is a neighbor, False if it is not\n # subject to both the cutoff+skin and the reduction mask that eliminates double counting and self-interactions\n neighbor_mask = jnp.where(\n (dist < self.cutoff_and_skin) & (reduction_mask), True, False\n )\n # when we pad the neighbor list, we will use last particle id in the neighbor list\n # this choice was made such that when we use the neighbor list in the masked energy calculat\n # the padded values will result in reasonably well defined values\n fill_value = jnp.argmax(neighbor_mask)\n fill_value = jnp.where(fill_value == pid, fill_value + 1, fill_value)\n\n # count up the number of neighbors\n n_neighbors = jnp.where(neighbor_mask, 1, 0).sum()\n\n # since neighbor_mask indices have a one-to-one correspondence to particle ids,\n # applying jnp.where, will return an array of the indices that are neighbors.\n # since this needs to be uniformly sized, we can just fill this array up to the n_max_neighbors.\n neighbor_list = jnp.array(\n jnp.where(neighbor_mask, size=n_max_neighbors, fill_value=fill_value),\n dtype=jnp.uint32,\n )\n # we need to generate a new mask associatd with the padded neighbor list\n # to be able to quickly exclude the padded values from the neighbor list\n neighbor_list_mask = jnp.where(jnp.arange(n_max_neighbors) < n_neighbors, 1, 0)\n\n del r_ij, dist\n return neighbor_list_mask, neighbor_list, n_neighbors\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # the call to x0 and box_vectors automatically convert these to jnp arrays in the correct unit system\n if isinstance(coordinates, unit.Quantity):\n if not coordinates.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Coordinates require distance units, not {coordinates.unit}\"\n )\n coordinates = coordinates.value_in_unit_system(unit.md_unit_system)\n\n if isinstance(box_vectors, unit.Quantity):\n if not box_vectors.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Box vectors require distance unit, not {box_vectors.unit}\"\n )\n box_vectors = box_vectors.value_in_unit_system(unit.md_unit_system)\n\n if box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors should be a 3x3 array, shape provided: {box_vectors.shape}\"\n )\n\n self.ref_coordinates = coordinates\n self.box_vectors = box_vectors\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(\n range(0, self.ref_coordinates.shape[0]), dtype=jnp.uint32\n )\n\n # calculate which pairs to exclude\n reduction_mask = self._pairs_mask(self.particle_ids)\n\n # calculate the distance for all pairs this will return\n # neighbor_mask: an array of shape (n_particles, n_particles) where each element is the mask\n # to determine if the particle is a neighbor\n # neighbor_list: an array of shape (n_particles, n_max_neighbors) where each element is the particle id of the neighbor\n # this is padded with zeros to ensure a uniform size;\n # n_neighbors: an array of shape (n_particles) where each element is the number of neighbors for that particle\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n while jnp.any(self.n_neighbors == self.n_max_neighbors).block_until_ready():\n log.debug(\n f\"Increasing n_max_neighbors from {self.n_max_neighbors} to at {jnp.max(self.n_neighbors)+10}\"\n )\n self.n_max_neighbors = int(jnp.max(self.n_neighbors) + 10)\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and its neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the neighbors of particle1\n neighbor_mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n dist: jnp.array\n Array of distances between the particle and its neighbors\n r_ij: jnp.array\n Array of displacement vectors between the particle and its neighbors\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of number of neighbors for each particle\n neighbor_list: jnp.array\n Array of particle ids for the neighbors, padded to n_max_neighbors. Shape (n_particles, n_max_neighbors)\n padding_mask: jnp.array\n Array of masks to exclude padding from the neighbor list of each particle. Shape (n_particles, n_max_neighbors)\n dist: jnp.array\n Array of distances between each particle and its neighbors. Shape (n_particles, n_max_neighbors)\n r_ij: jnp.array\n Array of displacement vectors between each particle and its neighbors. Shape (n_particles, n_max_neighbors, 3)\n \"\"\"\n # coordinates = sampler_state.x0\n # note, we assume the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.neighbor_list, self.neighbor_mask, coordinates)\n # mask = mask.reshape(-1, self.n_max_neighbors)\n return n_neighbors, self.neighbor_list, padding_mask, dist, r_ij\n\n @partial(jax.jit, static_argnums=(0,))\n def _calculate_particle_displacement(self, particle, coordinates, ref_coordinates):\n \"\"\"\n Calculate the displacement of a particle from the reference coordinates.\n If the displacement exceeds the half the skin distance, return True, otherwise return False.\n\n This function is designed to allow it to be jitted and vmapped over particle indices.\n\n Parameters\n ----------\n particle: int\n Particle id\n coordinates: jnp.array\n Array of particle coordinates\n ref_coordinates: jnp.array\n Array of reference particle coordinates\n\n Returns\n -------\n bool\n True if the particle is outside the skin distance, False if it is not.\n \"\"\"\n # calculate the displacement of a particle from the initial coordinates\n\n r_ij, displacement = self.space.displacement(\n coordinates[particle], ref_coordinates[particle]\n )\n\n status = jnp.where(displacement >= self.skin / 2.0, True, False)\n del displacement\n return status\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if the neighbor list needs to be rebuilt based on displacement of the particles from the reference coordinates.\n If a particle moves more than 0.5 skin distance, the neighborlist will be rebuilt.\n Will also return True if the size of the coordinates array changes.\n\n Note, this could also accept a user defined criteria for distance, but this is not implemented yet.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if the neighbor list needs to be rebuilt, False if it does not.\n \"\"\"\n\n if self.ref_coordinates.shape[0] != coordinates.shape[0]:\n return True\n\n status = jax.vmap(\n self._calculate_particle_displacement, in_axes=(0, None, None)\n )(self.particle_ids, coordinates, self.ref_coordinates)\n if jnp.any(status):\n del status\n return True\n else:\n del status\n return False" }, { "identifier": "OrthogonalPeriodicSpace", "path": "chiron/neighbors.py", "snippet": "class OrthogonalPeriodicSpace(Space):\n \"\"\"\n Defines the simulation space for an orthogonal periodic system.\n\n \"\"\"\n\n @property\n def box_vectors(self) -> jnp.array:\n return self._box_vectors\n\n @box_vectors.setter\n def box_vectors(self, box_vectors: jnp.array) -> None:\n self._box_vectors = box_vectors\n self._box_lengths = jnp.array(\n [box_vectors[0][0], box_vectors[1][1], box_vectors[2][2]]\n )\n\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self, xyz_1: jnp.array, xyz_2: jnp.array\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculated corrected displacement vector\n r_ij = (\n jnp.mod(r_ij + self._box_lengths * 0.5, self._box_lengths)\n - self._box_lengths * 0.5\n )\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n xyz = xyz - jnp.floor(xyz / self._box_lengths) * self._box_lengths\n\n return xyz" }, { "identifier": "PairList", "path": "chiron/neighbors.py", "snippet": "class PairList(PairsBase):\n \"\"\"\n N^2 pairlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the pair list calculation\n Examples\n --------\n >>> from chiron.neighbors import PairList, OrthogonalPeriodicSpace\n >>> from chiron.states import SamplerState\n >>> import jax.numpy as jnp\n >>>\n >>> space = OrthogonalPeriodicSpace()\n >>> pair_list = PairList(space, cutoff=2.5)\n >>> sampler_state = SamplerState(x0=jnp.array([[0.0, 0.0, 0.0], [2, 0.0, 0.0], [0.0, 2, 0.0]]),\n >>> box_vectors=jnp.array([[10, 0.0, 0.0], [0.0, 10, 0.0], [0.0, 0.0, 10]]))\n >>> pair_list.build_from_state(sampler_state)\n >>>\n >>> # mask and distances are of shape (n_particles, n_particles-1),\n >>> displacement_vectors of shape (n_particles, n_particles-1, 3)\n >>> # mask, is a bool array that is True if the particle is within the cutoff distance, False if it is not\n >>> # n_pairs is of shape (n_particles) and is per row sum of the mask. The mask ensure we also do not double count pairs\n >>> n_pairs, mask, distances, displacement_vectors = pair_list.calculate(sampler_state.x0)\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_and_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate all pairs (excluding self interactions)\n and mask that allows us to remove double-counting of pairs.\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n all_pairs: jnp.array\n Array of all pairs (excluding self interactions), of size (n_particles, n_particles-1)\n reduction_mask: jnp.array\n Bool mask that identifies which pairs to exclude to remove double counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n # we'll just keep with naming syntax for future flexibility\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i != particles_j\n all_pairs = jax.vmap(self._remove_self_interactions, in_axes=(0, 0))(\n particles_j, temp_mask\n )\n del temp_mask\n all_pairs = jnp.array(all_pairs[0], dtype=jnp.uint32)\n\n reduction_mask = jnp.where(particles_i < all_pairs, True, False)\n\n return all_pairs, reduction_mask\n\n @partial(jax.jit, static_argnums=(0,))\n def _remove_self_interactions(self, particles, temp_mask):\n return jnp.where(\n temp_mask, size=particles.shape[0] - 1, fill_value=particles.shape[0] - 1\n )\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # this will set self.ref_coordinates=coordinates and self.box_vectors\n self._validate_build_inputs(coordinates, box_vectors)\n\n self.n_particles = self.ref_coordinates.shape[0]\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(range(0, coordinates.shape[0]), dtype=jnp.uint32)\n\n # calculate which pairs to exclude\n self.all_pairs, self.reduction_mask = self._pairs_and_mask(self.particle_ids)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and all possible neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the possible particle pairs of particle1\n neighbor_mask: jnp.array\n Mask to exclude double particles to prevent double counting\n coordinates: jnp.array\n X,Y,Z coordinates of all particles, shaped (n_particles, 3)\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding particles not within the cutoff particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n dist: jnp.array\n Array of distances between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1) as it excludes self interactions\n r_ij: jnp.array\n Array of displacement vectors between the particle and all other particles in the system.\n Array has shape (n_particles, n_particles-1, 3) as it excludes self interactions\n\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[n_particles,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of the number of interacting particles (i.e., where dist < cutoff). Shape: (n_particles)\n pairs: jnp.array\n Array of particle ids that were considered for interaction. Shape: (n_particles, n_particles-1)\n padding_mask: jnp.array\n Array used to masks non interaction particle pairs. Shape: (n_particles, n_particles-1)\n dist: jnp.array\n Array of distances between pairs in the system. Shape: (n_particles, n_particles-1)\n r_ij: jnp.array\n Array of displacement vectors between particle pairs. Shape: (n_particles, n_particles-1, 3).\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n raise ValueError(\n f\"Number of particles cannot changes without rebuilding. \"\n f\"Coordinates must have shape ({self.n_particles}, 3), found {coordinates.shape}\"\n )\n\n # coordinates = self.space.wrap(coordinates)\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.all_pairs, self.reduction_mask, coordinates)\n\n return n_neighbors, self.all_pairs, padding_mask, dist, r_ij\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if we need to reconstruct internal arrays.\n For a simple pairlist this will always return False, unless the number of particles change.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if we need to rebuild the neighbor list, False if we do not.\n \"\"\"\n if coordinates.shape[0] != self.n_particles:\n return True\n else:\n return False" }, { "identifier": "SimulationReporter", "path": "chiron/reporters.py", "snippet": "class SimulationReporter:\n def __init__(self, filename: str, topology: Topology, buffer_size: int = 1):\n \"\"\"\n Initialize the SimulationReporter.\n\n Parameters\n ----------\n filename : str\n Name of the HDF5 file to write the simulation data.\n topology: openmm.Topology\n buffer_size : int, optional\n Number of data points to buffer before writing to disk (default is 1).\n\n \"\"\"\n import mdtraj as md\n\n self.filename = filename\n self.buffer_size = buffer_size\n self.topology = topology\n self.buffer = {}\n self.h5file = h5py.File(filename, \"a\")\n log.info(f\"Writing simulation data to {filename}\")\n\n def get_available_keys(self):\n return self.h5file.keys()\n\n def report(self, data_dict):\n \"\"\"\n Add new data to the buffer and write the buffer to disk if it's full.\n\n Parameters\n ----------\n data_dict : dict\n Dictionary containing data to report. Keys are data labels (e.g., 'energy'),\n and values are the data points (usually numpy arrays).\n\n \"\"\"\n for key, value in data_dict.items():\n if key not in self.buffer:\n self.buffer[key] = []\n self.buffer[key].append(value)\n\n if len(self.buffer[key]) >= self.buffer_size:\n self._write_to_disk(key)\n\n def _write_to_disk(self, key):\n \"\"\"\n Write buffered data of a given key to the HDF5 file.\n\n Parameters\n ----------\n key : str\n The key of the data to write to disk.\n\n \"\"\"\n data = np.array(self.buffer[key])\n if key in self.h5file:\n dset = self.h5file[key]\n dset.resize((dset.shape[0] + data.shape[0],) + data.shape[1:])\n dset[-data.shape[0] :] = data\n else:\n log.debug(f\"Creating {key} in {self.filename}\")\n self.h5file.create_dataset(\n key, data=data, maxshape=(None,) + data.shape[1:], chunks=True\n )\n\n self.buffer[key] = []\n\n def close(self):\n \"\"\"\n Write any remaining data in the buffer to disk and close the HDF5 file.\n\n \"\"\"\n for key in self.buffer:\n if self.buffer[key]:\n self._write_to_disk(key)\n self.h5file.close()\n\n def get_property(self, name: str):\n \"\"\"\n Get the property from the HDF5 file.\n\n Parameters\n ----------\n name : str\n Name of the property to get.\n\n Returns\n -------\n np.ndarray\n The property.\n\n \"\"\"\n if name not in self.h5file:\n log.debug(f\"{name} not in HDF5 file\")\n return None\n else:\n return np.array(self.h5file[name])\n\n def get_mdtraj_trajectory(self):\n import mdtraj as md\n\n return md.Trajectory(\n xyz=self.get_property(\"traj\"),\n topology=md.Topology.from_openmm(self.topology),\n unitcell_lengths=self.get_property(\"box_vectors\"),\n unitcell_angles=self.get_property(\"box_angles\"),\n )" }, { "identifier": "MetropolisDisplacementMove", "path": "chiron/mcmc.py", "snippet": "class MetropolisDisplacementMove(MetropolizedMove):\n \"\"\"A metropolized move that randomly displace a subset of atoms.\n\n Parameters\n ----------\n displacement_sigma : openmm.unit.Quantity\n The standard deviation of the normal distribution used to propose the\n random displacement (units of length, default is 1.0*nanometer).\n atom_subset : slice or list of int, optional\n If specified, the move is applied only to those atoms specified by these\n indices. If None, the move is applied to all atoms (default is None).\n\n Attributes\n ----------\n n_accepted : int\n The number of proposals accepted.\n n_proposed : int\n The total number of attempted moves.\n displacement_sigma\n atom_subset\n\n See Also\n --------\n MetropolizedMove\n\n \"\"\"\n\n def __init__(\n self,\n seed: int = 1234,\n displacement_sigma=1.0 * unit.nanometer,\n nr_of_moves: int = 100,\n atom_subset: Optional[List[int]] = None,\n simulation_reporter: Optional[SimulationReporter] = None,\n ):\n \"\"\"\n Initialize the MCMC class.\n\n Parameters\n ----------\n seed : int, optional\n The seed for the random number generator. Default is 1234.\n displacement_sigma : float or unit.Quantity, optional\n The standard deviation of the displacement for each move. Default is 1.0 nm.\n nr_of_moves : int, optional\n The number of moves to perform. Default is 100.\n atom_subset : list of int, optional\n A subset of atom indices to consider for the moves. Default is None.\n simulation_reporter : SimulationReporter, optional\n The reporter to write the data to. Default is None.\n Returns\n -------\n None\n \"\"\"\n\n super().__init__(nr_of_moves=nr_of_moves, seed=seed)\n self.displacement_sigma = displacement_sigma\n self.atom_subset = atom_subset\n self.simulation_reporter = simulation_reporter\n if self.simulation_reporter is not None:\n log.info(\n f\"Using reporter {self.simulation_reporter} saving to {self.simulation_reporter.filename}\"\n )\n\n def displace_positions(\n self, positions: jnp.array, displacement_sigma=1.0 * unit.nanometer\n ):\n \"\"\"Return the positions after applying a random displacement to them.\n\n Parameters\n ----------\n positions : nx3 jnp.array unit.Quantity\n The positions to displace.\n displacement_sigma : openmm.unit.Quantity\n The standard deviation of the normal distribution used to propose\n the random displacement (units of length, default is 1.0*nanometer).\n\n Returns\n -------\n rotated_positions : nx3 numpy.ndarray openmm.unit.Quantity\n The displaced positions.\n\n \"\"\"\n import jax.random as jrandom\n\n self.key, subkey = jrandom.split(self.key)\n nr_of_atoms = positions.shape[0]\n # log.debug(f\"Number of atoms is {nr_of_atoms}.\")\n unitless_displacement_sigma = displacement_sigma.value_in_unit_system(\n unit.md_unit_system\n )\n # log.debug(f\"Displacement sigma is {unitless_displacement_sigma}.\")\n displacement_vector = (\n jrandom.normal(subkey, shape=(nr_of_atoms, 3)) * 0.1\n ) # NOTE: convert from Angstrom to nm\n scaled_displacement_vector = displacement_vector * unitless_displacement_sigma\n # log.debug(f\"Unscaled Displacement vector is {displacement_vector}.\")\n # log.debug(f\"Scaled Displacement vector is {scaled_displacement_vector}.\")\n updated_position = positions + scaled_displacement_vector\n\n return updated_position\n\n def _propose_positions(self, initial_positions: jnp.array) -> jnp.array:\n \"\"\"Implement MetropolizedMove._propose_positions for apply().\"\"\"\n return self.displace_positions(initial_positions, self.displacement_sigma)\n\n def run(\n self,\n sampler_state: SamplerState,\n thermodynamic_state: ThermodynamicState,\n nbr_list=None,\n progress_bar=True,\n ):\n from tqdm import tqdm\n\n for trials in (\n tqdm(range(self.nr_of_moves)) if progress_bar else range(self.nr_of_moves)\n ):\n self.apply(\n thermodynamic_state, sampler_state, self.simulation_reporter, nbr_list\n )\n if trials % 100 == 0:\n log.debug(f\"Acceptance rate: {self.n_accepted / self.n_proposed}\")\n if self.simulation_reporter is not None:\n self.simulation_reporter.report(\n {\n \"Acceptance rate\": self.n_accepted / self.n_proposed,\n \"step\": self.n_proposed,\n }\n )\n\n log.info(f\"Acceptance rate: {self.n_accepted / self.n_proposed}\")" } ]
from openmmtools.testsystems import LennardJonesFluid from chiron.potential import LJPotential from openmm import unit from chiron.states import SamplerState, ThermodynamicState from chiron.neighbors import NeighborListNsqrd, OrthogonalPeriodicSpace from chiron.neighbors import PairList from chiron.reporters import SimulationReporter from chiron.mcmc import MetropolisDisplacementMove import os
13,336
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state sampler_state = SamplerState( x0=lj_fluid.positions, box_vectors=lj_fluid.system.getDefaultPeriodicBoxVectors() ) # define the thermodynamic state thermodynamic_state = ThermodynamicState( potential=lj_potential, temperature=300 * unit.kelvin ) # define the neighbor list for an orthogonal periodic space skin = 0.5 * unit.nanometer
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state sampler_state = SamplerState( x0=lj_fluid.positions, box_vectors=lj_fluid.system.getDefaultPeriodicBoxVectors() ) # define the thermodynamic state thermodynamic_state = ThermodynamicState( potential=lj_potential, temperature=300 * unit.kelvin ) # define the neighbor list for an orthogonal periodic space skin = 0.5 * unit.nanometer
nbr_list = NeighborListNsqrd(
3
2023-11-07 18:17:43+00:00
16k
WolfgangFahl/dcm
dcm/dcm_webserver.py
[ { "identifier": "Assessment", "path": "dcm/dcm_assessment.py", "snippet": "class Assessment:\n \"\"\"\n Assessment for CompetenceTree\n \"\"\"\n\n def __init__(\n self,\n webserver: NiceGuiWebserver,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n debug: bool = False,\n ):\n \"\"\"\n initialize the assessment\n\n Args:\n webserver(NiceguiWebServer): the webserver context\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n debug(bool): if True show debugging information\n \"\"\"\n self.webserver = webserver\n self.debug = debug\n self.reset(dcm=dcm, learner=learner)\n self.setup_ui()\n\n def reset(\n self,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n ):\n \"\"\"\n (re)set the assessment\n\n Args:\n webserver(NiceguiWebServer): the webserver context\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n \"\"\"\n self.dcm = dcm\n self.competence_tree = dcm.competence_tree\n self.learner = learner\n self.achievement_index = 0\n # do we need setup the achievements?\n if self.learner.achievements is None:\n self.learner.achievements = []\n self.setup_achievements()\n self.total = len(self.learner.achievements)\n\n def clear(self):\n \"\"\"\n clear the ui\n \"\"\"\n self.container.clear()\n\n @property\n def current_achievement(self) -> Achievement:\n if self.achievement_index < 0 or self.achievement_index > len(\n self.learner.achievements\n ):\n raise ValueError(f\"invalid achievement index {self.achievement_index}\")\n achievement = self.learner.achievements[self.achievement_index]\n return achievement\n\n def setup_achievements(self):\n \"\"\"\n Setup achievements based on the competence tree.\n\n This method iterates over the competence aspects and their facets,\n constructs a path for each facet, and creates an Achievement instance\n based on the path. These achievements are then added to the learner's\n achievements list.\n \"\"\"\n for aspect in self.competence_tree.aspects:\n for area in aspect.areas:\n area_path: str = f\"{self.competence_tree.id}/{aspect.id}\"\n self.add_achievement(area_path)\n for facet in area.facets:\n # Construct the path for the facet\n facet_path=f\"{area_path}/{facet.id}\"\n self.add_achievement(facet_path)\n \n def add_achievement(self,path):\n # Create a new Achievement instance with the constructed path\n new_achievement = Achievement(\n path=path,\n )\n self.learner.add_achievement(new_achievement)\n\n def get_index_str(self) -> str:\n index_str = f\"{self.achievement_index+1:2}/{self.total:2}\"\n return index_str\n\n def setup_ui(self):\n \"\"\"\n display my competence Tree elements\n \"\"\"\n with ui.grid(columns=1).classes(\"w-full\") as self.container:\n self.progress_bar = NiceguiProgressbar(\n total=self.total, desc=\"self assessment\", unit=\"facets\"\n )\n self.progress_bar.reset()\n with ui.row():\n ui.button(\"\", icon=\"arrow_back\", on_click=lambda _args: self.step(-1))\n ui.button(\"\", icon=\"arrow_forward\", on_click=lambda _args: self.step(1))\n with ui.row():\n with ui.card() as self.achievement_view:\n self.index_view = ui.label(self.get_index_str())\n self.link_view = ui.html()\n self.markdown_view = ui.markdown()\n self.button_row = ButtonRow(\n self, self.competence_tree, self.current_achievement\n )\n\n def show_progress(self):\n \"\"\"\n Update the progress bar based on the\n number of achievements with a non-None level value.\n \"\"\"\n count = sum(\n 1\n for achievement in self.learner.achievements\n if achievement.level is not None\n )\n self.progress_bar.total = self.total\n self.progress_bar.update_value(count)\n\n async def step(self, step: int = 0):\n self.update_achievement_view(step)\n\n def update_achievement_view(self, step: int = 0):\n \"\"\"\n display the active achievement as the step indicates\n \"\"\"\n self.show_progress()\n self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)\n if self.achievement_index + step < 0:\n ui.notify(\"first achievement reached!\")\n step = 0\n if self.achievement_index + step < len(self.learner.achievements):\n self.achievement_index += step\n self.index_view.text = self.get_index_str()\n achievement = self.current_achievement\n self.button_row.achievement = achievement\n self.button_row.set_button_states(achievement)\n competence_element = self.competence_tree.lookup_by_path(achievement.path)\n if not competence_element:\n ui.notify(\"invalid path: {achievement.path}\")\n self.markdown_view.content = f\"⚠️ {achievement.path}\"\n else:\n if hasattr(competence_element, \"path\"):\n if competence_element.url:\n link = Link.create(\n competence_element.url, competence_element.path\n )\n else:\n link = competence_element.path\n else:\n link = \"⚠️ - competence element path missing\"\n self.link_view.content = link\n description = competence_element.description or \"\"\n if isinstance(competence_element, CompetenceArea):\n aspect = competence_element.aspect\n description = f\"### {aspect.name}\\n\\n**{competence_element.name}**:\\n\\n{description}\"\n if isinstance(competence_element, CompetenceFacet):\n area = competence_element.area\n description = f\"### {area.name}\\n\\n**{competence_element.name}**:\\n\\n{description}\"\n self.markdown_view.content = description\n else:\n ui.notify(\"Done!\")" }, { "identifier": "DcmChart", "path": "dcm/dcm_chart.py", "snippet": "class DcmChart:\n \"\"\"\n a Dynamic competence map chart\n \"\"\"\n\n def __init__(self, dcm: DynamicCompetenceMap):\n \"\"\"\n Constructor\n \"\"\"\n self.dcm = dcm\n\n def generate_svg(\n self,\n filename: Optional[str] = None,\n learner: Optional[Learner] = None,\n config: Optional[SVGConfig] = None,\n ) -> str:\n \"\"\"\n Generate the SVG markup and optionally save it to a file. If a filename is given, the method\n will also save the SVG to that file. The SVG is generated based on internal state not shown here.\n\n Args:\n filename (str, optional): The path to the file where the SVG should be saved. Defaults to None.\n learner(Learner): the learner to show the achievements for\n config (SVGConfig, optional): The configuration for the SVG canvas and legend. Defaults to default values.\n\n Returns:\n str: The SVG markup.\n \"\"\"\n if config is None:\n config = SVGConfig() # Use default configuration if none provided\n svg_markup = self.generate_svg_markup(\n self.dcm.competence_tree, learner=learner, config=config\n )\n if filename:\n self.save_svg_to_file(svg_markup, filename)\n return svg_markup\n\n def generate_donut_segment_for_element(\n self,\n svg: SVG,\n element: CompetenceElement,\n learner: Learner,\n segment: DonutSegment,\n ):\n \"\"\"\n generate a donut segment for a given element of\n the CompetenceTree\n \"\"\"\n # Add the element segment as a donut segment\n element_url = (\n element.url\n if element.url\n else f\"{self.lookup_url}/description/{element.path}\"\n if self.lookup_url is not None\n else None\n )\n show_as_popup = element.url is None\n element_config = element.to_svg_node_config(\n url=element_url,\n show_as_popup=show_as_popup,\n x=self.cx,\n y=self.cy,\n )\n # check learner achievements\n if learner:\n achievement = learner.achievements_by_path.get(element.path, None)\n if achievement and achievement.level:\n element_config.element_class = \"selected\"\n svg.add_donut_segment(config=element_config, segment=segment)\n\n def generate_pie_elements(\n self,\n level: int,\n svg: SVG,\n parent_element: CompetenceElement,\n learner: Learner,\n segment: DonutSegment,\n ):\n \"\"\"\n generate the pie elements (donut segments) for the subelements\n of the given parent_element at the given level\n e.g. aspects, areas or facets - taking the learner\n achievements into account if a corresponding achievement\n is found. The segment limits the area in which the generation may operate\n \"\"\"\n sub_element_name = self.levels[level]\n # get the elements to be displayed\n elements = getattr(parent_element, sub_element_name)\n total = len(elements)\n # are there any elements to be shown?\n if total > 0:\n angle_per_element = (segment.end_angle - segment.start_angle) / total\n start_angle = segment.start_angle\n for element in elements:\n end_angle = start_angle + angle_per_element\n sub_segment = DonutSegment(\n segment.outer_radius,\n segment.outer_radius + self.tree_radius*2,\n start_angle,\n end_angle,\n )\n self.generate_donut_segment_for_element(\n svg, element, learner, segment=sub_segment\n )\n start_angle = end_angle\n if level + 1 < len(self.levels):\n self.generate_pie_elements(\n level=level + 1,\n svg=svg,\n parent_element=element,\n learner=learner,\n segment=sub_segment,\n )\n\n def generate_svg_markup(\n self,\n competence_tree: CompetenceTree = None,\n learner: Learner = None,\n config: SVGConfig = None,\n with_java_script: bool = True,\n lookup_url: str = \"\",\n ) -> str:\n \"\"\"\n generate the SVG markup for the given CompetenceTree and learner\n\n Args:\n\n \"\"\"\n if competence_tree is None:\n competence_tree = self.dcm.competence_tree\n\n svg = SVG(config)\n self.svg = svg\n config = svg.config\n # center of circle\n self.cx = config.width // 2\n self.cy = (config.total_height - config.legend_height) // 2\n self.levels = [\"aspects\", \"areas\", \"facets\"]\n self.tree_radius = config.width / 2 / 8\n\n self.lookup_url = (\n competence_tree.lookup_url if competence_tree.lookup_url else lookup_url\n )\n\n circle_config = competence_tree.to_svg_node_config(\n x=self.cx, \n y=self.cy, \n width=self.tree_radius\n )\n svg.add_circle(config=circle_config)\n\n segment = DonutSegment(\n inner_radius=0, \n outer_radius=self.tree_radius\n )\n self.generate_pie_elements(\n level=0,\n svg=svg,\n parent_element=competence_tree,\n learner=learner,\n segment=segment,\n )\n if config.legend_height > 0:\n competence_tree.add_legend(svg)\n\n return svg.get_svg_markup(with_java_script=with_java_script)\n\n def save_svg_to_file(self, svg_markup: str, filename: str):\n \"\"\"\n Save the SVG content to a file\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(svg_markup)" }, { "identifier": "CompetenceTree", "path": "dcm/dcm_core.py", "snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )" }, { "identifier": "DynamicCompetenceMap", "path": "dcm/dcm_core.py", "snippet": "class DynamicCompetenceMap:\n \"\"\"\n a visualization of a competence map\n \"\"\"\n\n def __init__(self, competence_tree: CompetenceTree):\n \"\"\"\n constructor\n \"\"\"\n self.competence_tree = competence_tree\n self.svg = None\n\n @property\n def main_id(self):\n main_id = self.competence_tree.id\n return main_id\n\n @classmethod\n def examples_path(cls) -> str:\n # the root directory (default: examples)\n path = os.path.join(os.path.dirname(__file__), \"../dcm_examples\")\n path = os.path.abspath(path)\n return path\n\n @classmethod\n def get_example_dcm_definitions(\n cls,\n markup: str = \"json\",\n required_keys: Optional[Tuple] = None,\n as_text: bool = True,\n ) -> dict:\n \"\"\"\n Retrieve example Dynamic Competence Map (DCM) definitions from files in the specified markup format (either JSON or YAML).\n\n Args:\n markup (str): The markup format of the input files. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n required_keys (Optional[Tuple]): A tuple of keys required to validate the data. If not provided, all keys will be considered valid.\n as_text (bool): If True, returns the file content as text; if False, returns parsed data. Defaults to True.\n\n Returns:\n dict: A dictionary where each key is the prefix of the file name and the value is the file content as text or parsed data, depending on the value of 'as_text'.\n\n Raises:\n Exception: If there's an error in reading or parsing the file, or if the file does not meet the required validation criteria.\n \"\"\"\n example_dcm_defs = {}\n file_ext = f\".{markup}\"\n examples_path = cls.examples_path()\n for dirpath, _dirnames, filenames in os.walk(examples_path):\n for filename in filenames:\n if filename.endswith(file_ext):\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as definition_file:\n file_prefix = filename.replace(file_ext, \"\")\n definition_text = definition_file.read()\n try:\n definition_data = cls.parse_markup(definition_text, markup)\n if cls.is_valid_definition(definition_data, required_keys):\n if as_text:\n example_dcm_defs[file_prefix] = definition_text\n else:\n example_dcm_defs[file_prefix] = definition_data\n except Exception as ex:\n cls.handle_markup_issue(\n filename, definition_text, ex, markup\n )\n return example_dcm_defs\n\n @classmethod\n def parse_markup(cls, text: str, markup: str) -> Union[dict, list]:\n \"\"\"\n Parse the given text as JSON or YAML based on the specified markup type.\n\n Args:\n text (str): The string content to be parsed.\n markup (str): The type of markup to use for parsing. Supported values are 'json' and 'yaml'.\n\n Returns:\n Union[dict, list]: The parsed data, which can be either a dictionary or a list, depending on the content.\n\n Raises:\n ValueError: If an unsupported markup format is specified.\n \"\"\"\n if markup == \"json\":\n data=json.loads(text)\n return data\n elif markup == \"yaml\":\n data=yaml.safe_load(text)\n return data\n else:\n raise ValueError(f\"Unsupported markup format: {markup}\")\n\n @classmethod\n def handle_markup_issue(cls, name: str, definition_string: str, ex, markup: str):\n if isinstance(ex, JSONDecodeError):\n lines = definition_string.splitlines() # Split the string into lines\n err_line = lines[ex.lineno - 1] # JSONDecodeError gives 1-based lineno\n pointer = (\n \" \" * (ex.colno - 1) + \"^\"\n ) # Create a pointer string to indicate the error position\n error_message = (\n f\"{name}:JSON parsing error on line {ex.lineno} column {ex.colno}:\\n\"\n f\"{err_line}\\n\"\n f\"{pointer}\\n\"\n f\"{ex.msg}\"\n )\n raise ValueError(error_message) # Raise a new exception with this message\n else:\n error_message = f\"error in {name}: {str(ex)}\"\n raise ValueError(error_message)\n\n @classmethod\n def is_valid_definition(cls, definition_data, required_keys: Tuple):\n return all(key in definition_data for key in required_keys)\n\n @classmethod\n def get_examples(cls, content_class=CompetenceTree, markup: str = \"json\") -> dict:\n examples = {}\n for name, definition_string in cls.get_example_dcm_definitions(\n required_keys=content_class.required_keys(), markup=markup\n ).items():\n example = cls.from_definition_string(\n name, definition_string, content_class, markup=markup\n )\n # check the type of the example\n example_id = example.main_id\n examples[example_id] = example\n return examples\n\n @classmethod\n def from_definition_string(\n cls, name: str, definition_string: str, content_class, markup: str = \"json\"\n ) -> Any:\n \"\"\"\n Load a DynamicCompetenceMap or Learner instance from a definition string (either JSON or YAML).\n\n Args:\n name (str): A name identifier for the data source.\n definition_string (str): The string content of the definition.\n content_class (dataclass_json): The class which will be instantiated with the parsed data.\n markup (str): The markup format of the data. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n\n Returns:\n DynamicCompetenceMap: An instance of DynamicCompetenceMap loaded with the parsed data.\n\n Raises:\n ValueError: If there's an error in parsing the data.\n \"\"\"\n try:\n data = cls.parse_markup(definition_string, markup)\n content = content_class.from_dict(data)\n if isinstance(content, CompetenceTree):\n return DynamicCompetenceMap(content)\n else:\n return content\n except Exception as ex:\n cls.handle_markup_issue(name, definition_string, ex, markup)" }, { "identifier": "Learner", "path": "dcm/dcm_core.py", "snippet": "class Learner:\n \"\"\"\n A learner with achievements.\n Attributes:\n learner_id (str): Identifier for the learner.\n achievements (Dict[str, List[Achievement]]):\n A dictionary where each key is a competence element identifier\n and the value is a list of Achievement instances for that tree.\n \"\"\"\n\n learner_id: str\n achievements: Optional[List[Achievement]] = field(default=None)\n\n def __post_init__(self):\n self.achievements_by_path = {}\n if self.achievements:\n for achievement in self.achievements:\n self.achievements_by_path[achievement.path] = achievement\n\n @classmethod\n def required_keys(cls):\n keys = {\"achievements\"}\n return keys\n\n @property\n def main_id(self):\n main_id = self.learner_id\n return main_id\n\n def add_achievement(self, new_achievement):\n self.achievements.append(new_achievement)\n self.achievements_by_path[new_achievement.path] = new_achievement\n\n def get_competence_tree_ids(self) -> List[str]:\n \"\"\"\n Get all unique competence tree IDs of my achievements.\n\n Returns:\n List[str]: A list of unique competence tree IDs.\n \"\"\"\n # Assuming that the learner's achievements are stored in a list called self.achievements\n # You can modify this part according to your actual data structure.\n\n # Create a set to store unique competence tree IDs\n unique_tree_ids = set()\n\n # Iterate through the learner's achievements\n for achievement in self.achievements:\n # Assuming each achievement has a tree_id attribute\n tree_id = achievement.tree_id\n\n # Add the tree_id to the set\n unique_tree_ids.add(tree_id)\n\n # Convert the set to a list and return\n return list(unique_tree_ids)" }, { "identifier": "SVG", "path": "dcm/svg.py", "snippet": "class SVG:\n \"\"\"\n Class for creating SVG drawings.\n\n Attributes:\n config (SVGConfig): Configuration for the SVG drawing.\n \"\"\"\n\n def __init__(self, config: SVGConfig = None):\n \"\"\"\n Initialize SVG object with given configuration.\n\n Args:\n config (SVGConfig): Configuration for SVG generation.\n \"\"\"\n self.config = config if config else SVGConfig()\n self.width = self.config.width\n self.height = self.config.height\n self.elements = []\n self.indent = self.config.indent\n\n def get_svg_style(self) -> str:\n \"\"\"\n Define styles for SVG elements.\n\n Returns:\n str: String containing style definitions for SVG.\n \"\"\"\n return (\n f\"{self.indent}<style>\\n\"\n f\"{self.indent * 2}.hoverable {{ cursor: pointer; fill-opacity: 1; stroke: black; stroke-width: 0.5; }}\\n\"\n f\"{self.indent * 2}.hoverable:hover {{ fill-opacity: 0.7; }}\\n\"\n f\"{self.indent * 2}.selected {{ fill-opacity: 0.5; stroke: blue; stroke-width: 1.5;}}\\n\"\n f\"{self.indent * 2}.popup {{\\n\"\n f\"{self.indent * 3}border: 2px solid black;\\n\"\n f\"{self.indent * 3}border-radius: 15px;\\n\"\n f\"{self.indent * 3}overflow: auto;\\n\" # changed to 'auto' to allow scrolling only if needed\n f\"{self.indent * 3}background: white;\\n\"\n f\"{self.indent * 3}box-sizing: border-box;\\n\" # ensures padding and border are included\n f\"{self.indent * 3}padding: 10px;\\n\" # optional padding inside the popup\n f\"{self.indent * 3}height: 100%;\\n\" # adjusts height relative to foreignObject\n f\"{self.indent * 3}width: 100%;\\n\" # adjusts width relative to foreignObject\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent * 2}.close-btn {{\\n\" # style for the close button\n f\"{self.indent * 3}cursor: pointer;\\n\"\n f\"{self.indent * 3}position: absolute;\\n\"\n f\"{self.indent * 3}top: 0;\\n\"\n f\"{self.indent * 3}right: 0;\\n\"\n f\"{self.indent * 3}padding: 5px;\\n\"\n f\"{self.indent * 3}font-size: 20px;\\n\"\n f\"{self.indent * 3}user-select: none;\\n\" # prevents text selection on click\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent}</style>\\n\"\n )\n\n def get_text_width(self, text: str) -> int:\n \"\"\"\n Estimate the width of a text string in the SVG based on the font size and font name.\n\n Args:\n text (str): The text content.\n\n Returns:\n int: The estimated width of the text in pixels.\n \"\"\"\n average_char_width_factor = 0.6\n average_char_width = average_char_width_factor * self.config.font_size\n return int(average_char_width * len(text))\n\n def add_element(self, element: str, level: int = 1, comment: str = None):\n \"\"\"\n Add an SVG element to the elements list with proper indentation.\n\n Args:\n element (str): SVG element to be added.\n level (int): Indentation level for the element.\n comment(str): optional comment to add\n \"\"\"\n base_indent = f\"{self.indent * level}\"\n if comment:\n indented_comment = f\"{base_indent}<!-- {comment} -->\\n\"\n self.elements.append(indented_comment)\n indented_element = f\"{base_indent}{element}\\n\"\n self.elements.append(indented_element)\n\n def add_circle(self, config: SVGNodeConfig):\n \"\"\"\n Add a circle element to the SVG, optionally making it clickable and with a hover effect.\n\n Args:\n config (SVGNodeConfig): Configuration for the circle element.\n \"\"\"\n color = config.fill if config.fill else self.config.default_color\n circle_element = f'<circle cx=\"{config.x}\" cy=\"{config.y}\" r=\"{config.width}\" fill=\"{color}\" class=\"{config.element_class}\" />'\n\n # If URL is provided, wrap the circle in an anchor tag to make it clickable\n if config.url:\n circle_indent = self.indent * (config.indent_level + 1)\n circle_element = f\"\"\"<a xlink:href=\"{config.url}\" target=\"_blank\">\n{circle_indent}{circle_element}\n</a>\"\"\"\n\n # Use add_group to add the circle element with proper indentation\n self.add_group(\n circle_element,\n group_id=config.id,\n group_class=config.element_class,\n level=config.indent_level,\n comment=config.comment,\n )\n\n def add_rectangle(\n self,\n x: int,\n y: int,\n width: int,\n height: int,\n fill: str = None,\n indent_level: int = 1,\n ):\n \"\"\"\n Add a rectangle element to the SVG.\n\n Args:\n x (int): X-coordinate of the rectangle's top-left corner.\n y (int): Y-coordinate of the rectangle's top-left corner.\n width (int): Width of the rectangle.\n height (int): Height of the rectangle.\n fill (str, optional): Fill color of the rectangle. Defaults to the default color.\n indent_level (int): Indentation level for the rectangle.\n \"\"\"\n color = fill if fill else self.config.default_color\n rect = f'{self.indent * 3}<rect x=\"{x}\" y=\"{y}\" width=\"{width}\" height=\"{height}\" fill=\"{color}\" />\\n'\n self.add_element(rect)\n\n def add_legend_column(\n self,\n items: List[Tuple[str, str]],\n title: str,\n x: int,\n y: int,\n width: int,\n height: int,\n ) -> None:\n \"\"\"\n Add a legend column to the SVG.\n\n Args:\n items (List[Tuple[str, str]]): List of tuples with color code and label.\n title (str): Title of the legend.\n x (int): X position of the legend.\n y (int): Y position of the legend.\n width (int): Width of the color box in the legend.\n height (int): Height of each legend item.\n \"\"\"\n self.add_text(x, y - height, title, font_weight=\"bold\")\n for index, (color, label) in enumerate(items):\n self.add_rectangle(x, y + index * (height + 5), width, height, color)\n self.add_text(x + width + 10, y + index * (height + 5) + height / 2, label)\n\n def add_text(\n self,\n x: int,\n y: int,\n text: str,\n fill: str = \"black\",\n font_weight: str = \"normal\",\n text_anchor: str = \"start\",\n ) -> None:\n \"\"\"\n Add text to the SVG.\n\n Args:\n x (int): X position of the text.\n y (int): Y position of the text.\n text (str): Text content.\n fill (str, optional): Fill color of the text. Defaults to \"black\".\n font_weight (str, optional): Font weight (normal, bold, etc.). Defaults to \"normal\".\n text_anchor (str, optional): Text alignment (start, middle, end). Defaults to \"start\".\n \"\"\"\n escaped_text = html.escape(text)\n text_element = (\n f'<text x=\"{x}\" y=\"{y}\" fill=\"{fill}\" '\n f'font-family=\"{self.config.font}\" '\n f'font-size=\"{self.config.font_size}\" '\n f'font-weight=\"{font_weight}\" '\n f'text-anchor=\"{text_anchor}\">'\n f\"{escaped_text}</text>\\n\"\n )\n self.add_element(text_element)\n\n def add_group(\n self,\n content: str,\n group_id: str = None,\n group_class: str = None,\n level: int = 1,\n comment: str = None,\n ):\n \"\"\"\n Add a group of elements to the SVG.\n\n Args:\n content (str): SVG content to be grouped.\n group_id (str, optional): ID for the group.\n group_class (str, optional): Class for the group.\n level (int): Indentation level for the group.\n \"\"\"\n group_attrs = []\n if group_id:\n group_attrs.append(f'id=\"{group_id}\"')\n if group_class:\n group_attrs.append(f'class=\"{group_class}\"')\n attrs_str = \" \".join(group_attrs)\n indented_content = \"\\n\".join(\n f\"{self.indent * (level + 1)}{line}\" for line in content.strip().split(\"\\n\")\n )\n group_str = f\"{self.indent * level}<g {attrs_str}>\\n{indented_content}\\n{self.indent * level}</g>\\n\"\n self.add_element(group_str, level=level, comment=comment)\n\n def add_pie_segment(\n self,\n cx: int,\n cy: int,\n radius: int,\n start_angle_deg: float,\n end_angle_deg: float,\n color: str,\n segment_name: str,\n segment_id: str = None,\n segment_class: str = None,\n segment_url: str = None,\n ) -> None:\n \"\"\"\n Add a pie segment to the SVG.\n\n Args:\n cx (int): X-coordinate of the center of the pie.\n cy (int): Y-coordinate of the center of the pie.\n radius (int): Radius of the pie.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n color (str): Fill color of the segment.\n segment_name (str): Name of the segment, used for the tooltip.\n segment_id (str, optional): ID for the segment group. Defaults to None.\n segment_class (str, optional): Class for the segment group. Defaults to None.\n segment_url (str, optional): URL linked to the segment. Defaults to None.\n\n Returns:\n None\n \"\"\"\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(start_angle_deg)\n end_angle_rad = radians(end_angle_deg)\n\n # Calculate the start and end points\n start_x = cx + radius * cos(start_angle_rad)\n start_y = cy + radius * sin(start_angle_rad)\n end_x = cx + radius * cos(end_angle_rad)\n end_y = cy + radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if end_angle_deg - start_angle_deg >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {cx} {cy} \"\n f\"L {start_x} {start_y} \"\n f\"A {radius} {radius} 0 {large_arc_flag} 1 {end_x} {end_y} \"\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(segment_name) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # If an URL is provided, wrap the content within an anchor\n if segment_url:\n group_content = (\n f'<a xlink:href=\"{segment_url}\" target=\"_blank\">\\n{group_content}</a>\\n'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content, group_id=segment_id, group_class=segment_class, level=2\n )\n\n def add_donut_segment(\n self,\n config: SVGNodeConfig,\n segment: DonutSegment,\n ) -> None:\n \"\"\"\n Add a donut segment to the SVG.\n\n Args:\n config (SVGNodeConfig): Configuration for the donut segment.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n \"\"\"\n cx, cy = config.x, config.y\n color = config.fill if config.fill else self.config.default_color\n\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(segment.start_angle)\n end_angle_rad = radians(segment.end_angle)\n\n # Calculate the start and end points for the outer radius\n start_x_outer = cx + segment.outer_radius * cos(start_angle_rad)\n start_y_outer = cy + segment.outer_radius * sin(start_angle_rad)\n end_x_outer = cx + segment.outer_radius * cos(end_angle_rad)\n end_y_outer = cy + segment.outer_radius * sin(end_angle_rad)\n\n # Calculate the start and end points for the inner radius\n start_x_inner = cx + segment.inner_radius * cos(start_angle_rad)\n start_y_inner = cy + segment.inner_radius * sin(start_angle_rad)\n end_x_inner = cx + segment.inner_radius * cos(end_angle_rad)\n end_y_inner = cy + segment.inner_radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if segment.end_angle - segment.start_angle >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {start_x_inner} {start_y_inner} \" # Move to start of inner arc\n f\"L {start_x_outer} {start_y_outer} \" # Line to start of outer arc\n f\"A {segment.outer_radius} {segment.outer_radius} 0 {large_arc_flag} 1 {end_x_outer} {end_y_outer} \" # Outer arc\n f\"L {end_x_inner} {end_y_inner} \" # Line to end of inner arc\n f\"A {segment.inner_radius} {segment.inner_radius} 0 {large_arc_flag} 0 {start_x_inner} {start_y_inner} \" # Inner arc (reverse)\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(config.title) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # Check if the segment should be shown as a popup\n if config.show_as_popup:\n # Add JavaScript to handle popup logic\n onclick_action = f\"onclick=\\\"showPopup('{config.url}', evt,this)\\\"\"\n group_content = f\"<g {onclick_action}>{group_content}</g>\"\n elif config.url:\n # Regular link behavior\n group_content = (\n f'<a xlink:href=\"{config.url}\" target=\"_blank\">{group_content}</a>'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content,\n group_id=config.id,\n group_class=config.element_class,\n level=2,\n comment=config.comment,\n )\n\n def get_java_script(self) -> str:\n \"\"\"\n get the java script code for interactive behavior\n \"\"\"\n popup_script = \"\"\"\n <script>\n function showPopup(url, evt,element) {\n // show a Popup fetching html content from the given url\n // for the given element\n // Handle the selection of the popup element\n selectPopupElement(element);\n var popup = document.getElementById('dcm-svg-popup');\n var iframe = document.getElementById('popup-iframe');\n var svgRect = evt.target.getBoundingClientRect();\n var svg = document.querySelector('svg');\n var svgPoint = svg.createSVGPoint();\n svgPoint.x = evt.clientX - svgRect.left;\n svgPoint.y = evt.clientY - svgRect.top;\n \n // Position the popup near the click event\n popup.setAttribute('x', svgPoint.x);\n popup.setAttribute('y', svgPoint.y);\n // Set the iframe src and make the popup visible\n iframe.setAttribute('src', url);\n popup.setAttribute('visibility', 'visible');\n }\n \n function selectPopupElement(element) {\n var popup = document.getElementById('dcm-svg-popup');\n \n // Deselect the current element if there is one\n if (popup.currentElement) {\n popup.currentElement.classList.remove('selected');\n }\n \n // Select the new element\n if (element) {\n element.classList.add('selected');\n popup.currentElement = element; // Update the reference to the currently selected element\n } else {\n popup.currentElement = null; // Clear the reference if no element is passed\n }\n }\n \n function closePopup() {\n var popup = document.getElementById('dcm-svg-popup');\n popup.setAttribute('visibility', 'hidden');\n // Deselect the element when the popup is closed\n selectPopupElement(null);\n }\n </script>\n \"\"\"\n return popup_script\n\n def get_svg_markup(self, with_java_script: bool = True) -> str:\n \"\"\"\n Generate the complete SVG markup.\n\n Args:\n with_java_script(bool): if True(default) the javascript code is included otherwise\n it's available via the get_java_script function\n\n Returns:\n str: String containing the complete SVG markup.\n \"\"\"\n # Get current date and time\n now = datetime.now()\n formatted_now = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n header = (\n f\"<!-- generated by dcm https://github.com/WolfgangFahl/dcm at {formatted_now} -->\\n\"\n f'<svg xmlns=\"http://www.w3.org/2000/svg\" '\n f'xmlns:xlink=\"http://www.w3.org/1999/xlink\" '\n f'width=\"{self.width}\" height=\"{self.config.total_height}\">\\n'\n )\n popup = \"\"\"\n <!-- Add a foreignObject for the popup -->\n<foreignObject id=\"dcm-svg-popup\" class=\"popup\" width=\"500\" height=\"354\" x=\"150\" y=\"260\" visibility=\"hidden\">\n <body xmlns=\"http://www.w3.org/1999/xhtml\">\n <!-- Content of your popup goes here -->\n <div class=\"popup\" style=\"background-color: white; border: 1px solid black; padding: 10px; box-sizing: border-box; width: 500px; height: 354px; position: relative;\">\n <span onclick=\"closePopup()\" class=\"close-btn\">ⓧ</span>\n <iframe id=\"popup-iframe\" width=\"100%\" height=\"100%\" frameborder=\"0\"></iframe>\n </div>\n </body>\n</foreignObject>\n\"\"\"\n\n styles = self.get_svg_style()\n body = \"\".join(self.elements)\n footer = \"</svg>\"\n java_script = self.get_java_script() if with_java_script else \"\"\n svg_markup = f\"{header}{java_script}{styles}{body}{popup}{footer}\"\n return svg_markup\n\n def save(self, filename: str):\n \"\"\"\n Save the SVG markup to a file.\n\n Args:\n filename (str): Filename to save the SVG markup.\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(self.get_svg_markup())" }, { "identifier": "SVGConfig", "path": "dcm/svg.py", "snippet": "class SVGConfig:\n \"\"\"\n Configuration class for SVG generation.\n\n Attributes:\n width (int): Width of the SVG canvas in pixels.\n height (int): Height of the SVG canvas in pixels.\n legend_height (int): Height reserved for the legend in pixels.\n font (str): Font family for text elements.\n font_size (int): Font size in points for text elements.\n indent (str): Indentation string, default is two spaces.\n default_color (str): Default color code for SVG elements.\n \"\"\"\n\n width: int = 600\n height: int = 600\n legend_height: int = 150\n font: str = \"Arial\"\n font_size: int = 12\n indent: str = \" \"\n default_color: str = \"#C0C0C0\"\n\n @property\n def total_height(self) -> int:\n \"\"\"\n Calculate total height of the SVG canvas including the legend.\n\n Returns:\n int: Total height of the SVG canvas.\n \"\"\"\n return self.height + self.legend_height" }, { "identifier": "Version", "path": "dcm/version.py", "snippet": "class Version:\n \"\"\"\n Version handling for nicepdf\n \"\"\"\n\n name = \"dcm\"\n version = dcm.__version__\n date = \"2023-11-06\"\n updated = \"2024-01-15\"\n description = \"python based visualization of dynamic competence maps\"\n\n authors = \"Wolfgang Fahl\"\n\n doc_url = \"https://wiki.bitplan.com/index.php/dcm\"\n chat_url = \"https://github.com/WolfgangFahl/dcm/discussions\"\n cm_url = \"https://github.com/WolfgangFahl/dcm\"\n\n license = f\"\"\"Copyright 2023 contributors. All rights reserved.\n\n Licensed under the Apache License 2.0\n http://www.apache.org/licenses/LICENSE-2.0\n\n Distributed on an \"AS IS\" basis without warranties\n or conditions of any kind, either express or implied.\"\"\"\n\n longDescription = f\"\"\"{name} version {version}\n{description}\n\n Created by {authors} on {date} last updated {updated}\"\"\"" } ]
import os from typing import Optional from urllib.parse import urlparse from fastapi import HTTPException from fastapi.responses import HTMLResponse from ngwidgets.file_selector import FileSelector from ngwidgets.input_webserver import InputWebserver from ngwidgets.webserver import WebserverConfig from nicegui import Client, app, ui from pydantic import BaseModel from dcm.dcm_assessment import Assessment from dcm.dcm_chart import DcmChart from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap, Learner from dcm.svg import SVG, SVGConfig from dcm.version import Version
13,264
""" Endpoints to get the description of a competence area Args: tree_id (str): ID of the tree area_id (str): ID of the area aspect_id (str, optional): ID of the aspect. Defaults to None. Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}/{area_id}" return await self.show_description(path) @app.get("/description/{tree_id}/{aspect_id}") async def get_description_for_aspect( tree_id: str, aspect_id: str = None ) -> HTMLResponse: """ Endpoint to get the description of a competence aspect Args: tree_id (str): ID of the tree area_id (str): ID of the area Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}" return await self.show_description(path) @app.get("/description/{tree_id}") async def get_description_for_tree( tree_id: str ) -> HTMLResponse: """ Endpoint to get the description of a competence tree Args: tree_id (str): ID of the tree Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}" return await self.show_description(path) async def show_description( self, path:str=None ) -> HTMLResponse: """ Show the HTML description of a specific competence element given by the path Args: path(str): the path identifying the element Returns: HTMLResponse: The response object containing the HTML-formatted description. Raises: HTTPException: If the example name provided does not exist in the examples collection. """ path_parts=path.split("/") tree_id=path_parts[0] if tree_id in self.examples: example = self.examples[tree_id] element = example.competence_tree.lookup_by_path(path) if element: content = element.as_html() return HTMLResponse(content=content) else: content = ( f"No element found for {path} in {tree_id}" ) return HTMLResponse(content=content, status_code=404) else: msg = f"unknown competence tree {tree_id}" raise HTTPException(status_code=404, detail=msg) async def render_svg(self, svg_render_request: SVGRenderRequest) -> HTMLResponse: """ render the given request """ r = svg_render_request dcm = DynamicCompetenceMap.from_definition_string( r.name, r.definition, content_class=CompetenceTree, markup=r.markup ) dcm_chart = DcmChart(dcm) svg_markup = dcm_chart.generate_svg_markup( config=r.config, with_java_script=True ) response = HTMLResponse(content=svg_markup) return response def get_basename_without_extension(self, url) -> str: # Parse the URL to get the path component path = urlparse(url).path # Extract the base name (e.g., "example.html" from "/dir/example.html") basename = os.path.basename(path) # Split the base name and extension and return just the base name return os.path.splitext(basename)[0] async def render(self, _click_args=None): """ Renders the json content as an SVG visualization Args: click_args (object): The click event arguments. """ try: input_source = self.input if input_source: name = self.get_basename_without_extension(input_source) ui.notify(f"rendering {name}") definition = self.do_read_input(input_source) # Determine the format based on the file extension markup = "json" if input_source.endswith(".json") else "yaml" if "learner_id" in definition:
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str config: Optional[SVGConfig] = None class DynamicCompentenceMapWebServer(InputWebserver): """ server to supply Dynamic Competence Map Visualizations """ @classmethod def get_config(cls) -> WebserverConfig: """ get the configuration for this Webserver """ copy_right = "(c)2023-2024 Wolfgang Fahl" config = WebserverConfig( copy_right=copy_right, version=Version(), default_port=8885 ) return config def __init__(self): """Constructs all the necessary attributes for the WebServer object.""" InputWebserver.__init__( self, config=DynamicCompentenceMapWebServer.get_config() ) self.examples = DynamicCompetenceMap.get_examples(markup="yaml") self.dcm = None self.assessment = None @app.post("/svg/") async def render_svg(svg_render_request: SVGRenderRequest) -> HTMLResponse: """ render the given request """ return await self.render_svg(svg_render_request) @app.get("/description/{tree_id}/{aspect_id}/{area_id}/{facet_id}") async def get_description_for_facet( tree_id: str, aspect_id: str = None, area_id:str=None, facet_id: str = None ) -> HTMLResponse: """ Endpoints to get the description of a competence facet Args: tree_id (str): ID of the tree area_id (str): ID of the area aspect_id (str, optional): ID of the aspect. Defaults to None. facet_id (str, optional): ID of the facet. Defaults to None. Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}/{area_id}/{facet_id}" return await self.show_description(path) @app.get("/description/{tree_id}/{aspect_id}/{area_id}") async def get_description_for_area( tree_id: str, aspect_id: str = None, area_id:str=None ) -> HTMLResponse: """ Endpoints to get the description of a competence area Args: tree_id (str): ID of the tree area_id (str): ID of the area aspect_id (str, optional): ID of the aspect. Defaults to None. Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}/{area_id}" return await self.show_description(path) @app.get("/description/{tree_id}/{aspect_id}") async def get_description_for_aspect( tree_id: str, aspect_id: str = None ) -> HTMLResponse: """ Endpoint to get the description of a competence aspect Args: tree_id (str): ID of the tree area_id (str): ID of the area Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}/{aspect_id}" return await self.show_description(path) @app.get("/description/{tree_id}") async def get_description_for_tree( tree_id: str ) -> HTMLResponse: """ Endpoint to get the description of a competence tree Args: tree_id (str): ID of the tree Returns: HTMLResponse: HTML content of the description. """ path=f"{tree_id}" return await self.show_description(path) async def show_description( self, path:str=None ) -> HTMLResponse: """ Show the HTML description of a specific competence element given by the path Args: path(str): the path identifying the element Returns: HTMLResponse: The response object containing the HTML-formatted description. Raises: HTTPException: If the example name provided does not exist in the examples collection. """ path_parts=path.split("/") tree_id=path_parts[0] if tree_id in self.examples: example = self.examples[tree_id] element = example.competence_tree.lookup_by_path(path) if element: content = element.as_html() return HTMLResponse(content=content) else: content = ( f"No element found for {path} in {tree_id}" ) return HTMLResponse(content=content, status_code=404) else: msg = f"unknown competence tree {tree_id}" raise HTTPException(status_code=404, detail=msg) async def render_svg(self, svg_render_request: SVGRenderRequest) -> HTMLResponse: """ render the given request """ r = svg_render_request dcm = DynamicCompetenceMap.from_definition_string( r.name, r.definition, content_class=CompetenceTree, markup=r.markup ) dcm_chart = DcmChart(dcm) svg_markup = dcm_chart.generate_svg_markup( config=r.config, with_java_script=True ) response = HTMLResponse(content=svg_markup) return response def get_basename_without_extension(self, url) -> str: # Parse the URL to get the path component path = urlparse(url).path # Extract the base name (e.g., "example.html" from "/dir/example.html") basename = os.path.basename(path) # Split the base name and extension and return just the base name return os.path.splitext(basename)[0] async def render(self, _click_args=None): """ Renders the json content as an SVG visualization Args: click_args (object): The click event arguments. """ try: input_source = self.input if input_source: name = self.get_basename_without_extension(input_source) ui.notify(f"rendering {name}") definition = self.do_read_input(input_source) # Determine the format based on the file extension markup = "json" if input_source.endswith(".json") else "yaml" if "learner_id" in definition:
content_class = Learner
4
2023-11-06 09:24:24+00:00
16k
shadowpa0327/FLORA
models/build.py
[ { "identifier": "VisionTransformer", "path": "models/deit.py", "snippet": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n - https://arxiv.org/abs/2010.11929\n\n Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\n - https://arxiv.org/abs/2012.12877\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\n act_layer=None, weight_init=''):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n embed_layer (nn.Module): patch embedding layer\n norm_layer: (nn.Module): normalization layer\n weight_init: (str): weight init scheme\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_tokens = 1\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = embed_layer(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n self.total_patches = num_patches + self.num_tokens\n \n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.Sequential(*[\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\n for i in range(depth)])\n self.norm = norm_layer(embed_dim)\n\n # Representation layer\n self.pre_logits = nn.Identity()\n\n # Classifier head(s)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n self.current_cfg = None\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def get_classifier(self):\n return self.head\n\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x, return_intermediate = False):\n intermediate_outputs = []\n x = self.patch_embed(x)\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n x = torch.cat((cls_token, x), dim=1)\n x = self.pos_drop(x + self.pos_embed)\n for b in self.blocks:\n if return_intermediate:\n intermediate_outputs.append(x)\n x = b(x)\n x = self.norm(x)\n if return_intermediate:\n return self.pre_logits(x[:, 0]), intermediate_outputs\n else:\n return self.pre_logits(x[:, 0])\n\n\n def forward(self, x, return_intermediate = False):\n if return_intermediate:\n x, intermedite_outputs = self.forward_features(x, return_intermediate)\n x = self.head(x)\n return x, intermedite_outputs\n else:\n x = self.forward_features(x)\n x = self.head(x)\n return x" }, { "identifier": "SwinTransformer", "path": "models/swin_transformer.py", "snippet": "class SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n fused_window_process (bool, optional): If True, use one kernel to fused window shift & window partition for acceleration, similar for the reversed part. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, fused_window_process=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n fused_window_process=fused_window_process)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops" }, { "identifier": "LRSwinTransformer", "path": "models/lr_swin_transformer.py", "snippet": "class LRSwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n fused_window_process (bool, optional): If True, use one kernel to fused window shift & window partition for acceleration, similar for the reversed part. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, fused_window_process=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n fused_window_process=fused_window_process)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops\n\n def set_sample_config(self, lr_config):\n for ratio, layer in zip(lr_config, filter(lambda x: isinstance(x, LRLinearSuper), self.modules())):\n layer.set_sample_config(ratio)\n\n # def set_random_config_fn(self, fn):\n # self.random_config_fn = fn\n\n # def set_random_sample_config(self):\n # self.set_sample_config(self.random_config_fn())" }, { "identifier": "LRSwinTransformer", "path": "models/lr_swin_transformer_subnet.py", "snippet": "class LRSwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n fused_window_process (bool, optional): If True, use one kernel to fused window shift & window partition for acceleration, similar for the reversed part. Default: False\n \"\"\"\n\n def __init__(self, svd_config, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, fused_window_process=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(svd_config=svd_config[i_layer],\n dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n fused_window_process=fused_window_process)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops" }, { "identifier": "LRVisionTransformer", "path": "models/lr_deit.py", "snippet": "class LRVisionTransformer(nn.Module):\n \"\"\" Vision Transformer\n\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n - https://arxiv.org/abs/2010.11929\n\n Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\n - https://arxiv.org/abs/2012.12877\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\n act_layer=None, weight_init='', fused_lr = False):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n distilled (bool): model includes a distillation token and head as in DeiT models\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n embed_layer (nn.Module): patch embedding layer\n norm_layer: (nn.Module): normalization layer\n weight_init: (str): weight init scheme\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_tokens = 2 if distilled else 1\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = embed_layer(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n self.total_patches = num_patches + self.num_tokens\n \n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.Sequential(*[\n Block(\n dim=embed_dim, num_heads=num_heads, num_tokens=self.total_patches, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, fused_lr=fused_lr)\n for i in range(depth)])\n self.norm = norm_layer(embed_dim)\n\n # Representation layer\n if representation_size and not distilled:\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n ('fc', nn.Linear(embed_dim, representation_size)),\n ('act', nn.Tanh())\n ]))\n else:\n self.pre_logits = nn.Identity()\n\n # Classifier head(s)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n self.current_cfg = None\n \n \n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token', 'dist_token'}\n\n def get_classifier(self):\n if self.dist_token is None:\n return self.head\n else:\n return self.head, self.head_dist\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n if self.num_tokens == 2:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n if self.dist_token is None:\n x = torch.cat((cls_token, x), dim=1)\n else:\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n x = self.pos_drop(x + self.pos_embed)\n x = self.blocks(x)\n x = self.norm(x)\n if self.dist_token is None:\n return self.pre_logits(x[:, 0])\n else:\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.head_dist is not None:\n x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple\n if self.training and not torch.jit.is_scripting():\n # during inference, return the average of both classifier predictions\n return x, x_dist\n else:\n return (x + x_dist) / 2\n else:\n x = self.head(x)\n return x\n\n '''\n normalized: set it to be true is current config are already normalized\n '''\n def set_sample_config(self, lr_config, already_normalized=True):\n self.current_cfg = lr_config\n for rank_choice, layer in zip(lr_config, filter(lambda x: isinstance(x, LRLinearSuper), self.modules())):\n #print(ratio, layer)\n layer.set_sample_config(rank_choice, already_normalized)\n\n # def set_random_config_fn(self, fn, already_normalized = True):\n # self.random_config_fn = fn\n # self.rank_already_normalized = already_normalized\n\n # def set_random_sample_config(self):\n # self.set_sample_config(self.random_config_fn(), self.rank_already_normalized)\n\n def flops(self, cfg = None):\n flops = 0\n flops += self.patch_embed.flops()\n \n L = self.total_patches\n for block in self.blocks:\n # norm1\n flops += L * block.dim\n # attn\n # qkv embedding (skip)\n # calculate attn matrix\n flops += block.attn.num_heads * L * (block.dim // block.attn.num_heads) * L\n # attn @ v\n flops += block.attn.num_heads * L * L * (block.dim // block.attn.num_heads)\n # proj \n flops += L * block.dim * block.dim\n # mlp (skip)\n # norm2\n flops += block.dim * L\n \n # add the flops of linear embedding\n matrics_aspect_ratio = [3, 4, 4]\n config = cfg if cfg is not None else self.current_cfg\n for i, rank_ratio in enumerate(config):\n aspect_ratio = matrics_aspect_ratio[i%3]\n flops += L * min(self.num_features * self.num_features * aspect_ratio,\n int(round(rank_ratio * self.num_features)) * self.num_features * (1 + aspect_ratio))\n \n \n \n \n flops += self.num_features * self.num_tokens\n flops += self.num_features * self.num_classes\n return flops" }, { "identifier": "LRVisionTransformerSubnet", "path": "models/lr_deit_subnet.py", "snippet": "class LRVisionTransformerSubnet(nn.Module):\n \"\"\" Vision Transformer\n\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n - https://arxiv.org/abs/2010.11929\n\n Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\n - https://arxiv.org/abs/2012.12877\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\n act_layer=None, weight_init='', svd_config = None):\n \"\"\"\n Args:\n img_size (int, tuple): input image size\n patch_size (int, tuple): patch size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (int): embedding dimension\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n distilled (bool): model includes a distillation token and head as in DeiT models\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n embed_layer (nn.Module): patch embedding layer\n norm_layer: (nn.Module): normalization layer\n weight_init: (str): weight init scheme\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models\n self.num_tokens = 2 if distilled else 1\n norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n act_layer = act_layer or nn.GELU\n\n self.patch_embed = embed_layer(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule\n self.blocks = nn.Sequential(*[\n Block(\n dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,\n svd_config=svd_config[i]\n )\n for i in range(depth)])\n self.norm = norm_layer(embed_dim)\n\n # Representation layer\n if representation_size and not distilled:\n self.num_features = representation_size\n self.pre_logits = nn.Sequential(OrderedDict([\n ('fc', nn.Linear(embed_dim, representation_size)),\n ('act', nn.Tanh())\n ]))\n else:\n self.pre_logits = nn.Identity()\n\n # Classifier head(s)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token', 'dist_token'}\n\n def get_classifier(self):\n if self.dist_token is None:\n return self.head\n else:\n return self.head, self.head_dist\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n if self.num_tokens == 2:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks\n if self.dist_token is None:\n x = torch.cat((cls_token, x), dim=1)\n else:\n x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)\n x = self.pos_drop(x + self.pos_embed)\n x = self.blocks(x)\n x = self.norm(x)\n if self.dist_token is None:\n return self.pre_logits(x[:, 0])\n else:\n return x[:, 0], x[:, 1]\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.head_dist is not None:\n x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple\n if self.training and not torch.jit.is_scripting():\n # during inference, return the average of both classifier predictions\n return x, x_dist\n else:\n return (x + x_dist) / 2\n else:\n x = self.head(x)\n return x" } ]
from .deit import VisionTransformer from .swin_transformer import SwinTransformer from .lr_swin_transformer import LRSwinTransformer from .lr_swin_transformer_subnet import LRSwinTransformer as LRSwinTransformerSubnet from .lr_deit import LRVisionTransformer from .lr_deit_subnet import LRVisionTransformerSubnet
10,828
# -------------------------------------------------------- # TinyViT Model Builder # Copyright (c) 2022 Microsoft # -------------------------------------------------------- def build_model(config): model_type = config.MODEL.TYPE if model_type == 'swin': model = SwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'deit': model = VisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ) elif model_type == 'lr_swin': model = LRSwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_swin_subnet': model = LRSwinTransformerSubnet( svd_config=config.MODEL.SWIN.SVD_CONFIG, img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_deit': model = LRVisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, fused_lr=config.MODEL.DEIT.FUSE_LR, ) elif model_type == 'lr_deit_subnet':
# -------------------------------------------------------- # TinyViT Model Builder # Copyright (c) 2022 Microsoft # -------------------------------------------------------- def build_model(config): model_type = config.MODEL.TYPE if model_type == 'swin': model = SwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'deit': model = VisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ) elif model_type == 'lr_swin': model = LRSwinTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_swin_subnet': model = LRSwinTransformerSubnet( svd_config=config.MODEL.SWIN.SVD_CONFIG, img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT, fused_window_process=config.FUSED_WINDOW_PROCESS ) elif model_type == 'lr_deit': model = LRVisionTransformer( img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.DEIT.PATCH_SIZE, in_chans=config.MODEL.DEIT.IN_CHANS, num_classes=config.MODEL.NUM_CLASSES, embed_dim=config.MODEL.DEIT.EMBED_DIM, depth=config.MODEL.DEIT.DEPTH, num_heads = config.MODEL.DEIT.NUM_HEADS, mlp_ratio = config.MODEL.DEIT.MLP_RATIO, qkv_bias = config.MODEL.DEIT.QKV_BIAS, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, fused_lr=config.MODEL.DEIT.FUSE_LR, ) elif model_type == 'lr_deit_subnet':
model = LRVisionTransformerSubnet(
5
2023-11-03 09:54:45+00:00
16k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n def forward(self, batched_input, multimask_output, image_size):\n if isinstance(batched_input, list):\n outputs = self.forward_test(batched_input, multimask_output)\n else:\n outputs = self.forward_train(batched_input, multimask_output, image_size)\n return outputs\n\n def forward_train(self, batched_input, multimask_output, image_size):\n input_images = self.preprocess(batched_input)\n image_embeddings = self.image_encoder(input_images)\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=None, boxes=None, masks=None\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=(image_size, image_size),\n original_size=(image_size, image_size)\n )\n outputs = {\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks\n }\n return outputs\n\n @torch.no_grad()\n def forward_test(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input promts,\n C is determiend by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "SAMed/segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks = masks[0].detach().cpu().numpy()\n iou_predictions = iou_predictions[0].detach().cpu().numpy()\n low_res_masks = low_res_masks[0].detach().cpu().numpy()\n return masks, iou_predictions, low_res_masks\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecesary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,326
"point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
data["boxes"] = batched_mask_to_box(data["masks"])
5
2023-11-03 17:05:40+00:00
16k
microsoft/PLEX
PLEX/finetuning.py
[ { "identifier": "TrajectoryDataset", "path": "PLEX/util/data.py", "snippet": "class TrajectoryDataset:\n def __init__(self, trajectories, camera_names, contextual):\n self.trajectories = list(trajectories)\n if not globals.full_state_mode:\n self.camera_names = camera_names\n self.traj_lens = np.array([traj['len'] for traj in self.trajectories])\n else:\n self.camera_names = None\n self.traj_lens = np.array([len(traj['full_state']) for traj in self.trajectories])\n self.contextual = contextual\n\n if len(self.trajectories) == 0:\n return\n\n self.p_sample = self.traj_lens / np.sum(self.traj_lens)\n\n proto_traj = self.trajectories[0]\n proto_traj = proto_traj['load_images'](proto_traj)\n if not globals.full_state_mode:\n self.image_dims = None\n\n for cam in self.camera_names:\n image_dims = proto_traj['image'][cam].shape[1:]\n if self.image_dims is None:\n self.image_dims = image_dims\n else:\n assert np.all(self.image_dims == image_dims) or np.all(image_dims == ()), f'Images from a given cam should all be None or have the same size as from other cams. Other cams\\' image size: {self.image_dims}, this cam\\'s image size is {image_dims}.'\n else:\n self.full_state_dim = proto_traj['full_state'].shape[1]\n\n # Check for existence of optional keys\n self.has_proprios = 'proprio' in proto_traj\n self.has_actions = 'action' in proto_traj\n self.has_rewards = 'reward' in proto_traj\n\n if self.has_proprios:\n assert not globals.full_state_mode, 'We shouldn\\'t be using proprios in full-state mode.'\n self.proprio_dim = proto_traj['proprio'].shape[1]\n for traj in trajectories:\n assert traj['proprio'].shape[1] == self.proprio_dim\n\n if self.has_actions:\n self.action_dim = proto_traj['action'].shape[1]\n for traj in trajectories:\n assert traj['action'].shape[1] == self.action_dim\n\n def __len__(self):\n return len(self.trajectories)\n\n @property\n def video_only(self):\n return not self.has_actions and not self.has_proprios and not self.has_rewards and not globals.full_state_mode\n\n def copy_frames(self, src, actual_trg_len, rate_ratio, raise_frame_rate, pad_frame_gaps):\n # Copies data from a source array, adjusting frame rates as necessary.\n\n # Allocate the destination array to be same shape as the source array,\n # except for the first dimension (time), which must be actual_trg_len.\n trg_data = np.zeros((actual_trg_len, *src.shape[1:]), dtype=src.dtype)\n actual_src_len = len(src)\n\n if rate_ratio == 1:\n # The frame rates match. Do a direct copy.\n trg_data[:] = src[:actual_src_len]\n elif raise_frame_rate:\n # The source frame rate is too low. Copy source items as needed.\n for i in range(rate_ratio):\n new_src_len = len(trg_data[i::rate_ratio])\n trg_data[i::rate_ratio] = src[:new_src_len]\n if pad_frame_gaps:\n break # Leave zeros in the intervening frames.\n else:\n # The source frame rate is too high. Skip the unneeded items.\n trg_data[:] = src[0:rate_ratio * actual_src_len:rate_ratio]\n return trg_data\n\n def sample_batch(self, batch_size, target_frame_rate, pad_frame_gaps, max_len, get_context, discount, device=globals.DEFAULT_DEVICE, context_from_same_traj=False):\n assert len(self.trajectories) > 0\n # We should probably factor out the code that maps trajectories to tasks so that this computation is done only once, not every time a batch is sampled.\n task_name2traj_idx_dict = {}\n\n for i in range(len(self.trajectories)):\n if self.trajectories[i]['task_info'].name in task_name2traj_idx_dict.keys():\n task_name2traj_idx_dict[self.trajectories[i]['task_info'].name].append(i)\n else:\n task_name2traj_idx_dict[self.trajectories[i]['task_info'].name] = [i]\n\n batch_inds = np.random.choice(\n np.arange(len(self.trajectories)),\n size=batch_size,\n replace=True,\n p=self.p_sample # reweights so we sample according to timesteps\n )\n\n if not globals.full_state_mode:\n images = {cam: [] for cam in self.camera_names}\n contexts = {cam: [] for cam in self.camera_names} if self.contextual else None\n proprios = [] if self.has_proprios else None\n else:\n full_states = []\n contexts = []\n proprios = None\n\n masks = []\n actions = [] if self.has_actions else None\n rewards, returns = ([], []) if self.has_rewards else (None, None)\n timesteps = []\n\n for batch_index in range(batch_size):\n traj = None\n traj_len = -1\n is_valid = False\n\n while not is_valid:\n traj = self.trajectories[batch_inds[batch_index]]\n traj_len = traj['len'] if not globals.full_state_mode else len(traj['full_state'])\n\n if self.contextual:\n MAX_RETRIES = 3\n retry_ctr = 0\n while not is_valid and retry_ctr < MAX_RETRIES:\n retry_ctr += 1\n if self.video_only:\n # Choose a context from the same trajectory\n ctx, is_valid = get_context(traj, 0, traj_len)\n else:\n # Choose a context from another random trajectory **of the same task**.\n if context_from_same_traj:\n ctx_traj = traj\n else:\n ctx_traj_idx = task_name2traj_idx_dict[traj['task_info'].name][randrange(len(task_name2traj_idx_dict[traj['task_info'].name]))]\n ctx_traj = self.trajectories[ctx_traj_idx]\n ctx_traj_len = ctx_traj['len'] if not globals.full_state_mode else len(ctx_traj['full_state'])\n ctx, is_valid = get_context(ctx_traj, 0, ctx_traj_len)\n\n if is_valid and retry_ctr > 1:\n print(f'Found a valid context only on the {retry_ctr}th attempt...')\n\n if not is_valid:\n # Sample a different trajectory\n batch_inds[batch_index] = np.random.choice(\n np.arange(len(self.trajectories)),\n size=1,\n replace=True,\n p=self.p_sample # reweights so we sample according to timesteps\n )[0]\n continue\n\n if not globals.full_state_mode:\n for cam in self.camera_names:\n contexts[cam].append(ctx[cam][np.newaxis])\n else:\n contexts.append(ctx[np.newaxis])\n else:\n # Non-contexttual trajectories don't need a context, by definition, so we'll just oveeride the context validity check.\n is_valid = True\n\n src_end = random.randint(1, traj_len)\n data_frame_rate = traj['task_info'].frame_rate # Source fps.\n max_trg_len = max_len # trg refers to target arrays that will be returned.\n\n assert (data_frame_rate is None) or (target_frame_rate is None) or (\n data_frame_rate == target_frame_rate) or self.video_only, \\\n \"For now, the target and data frame rates can be different only for video-only data.\"\n\n if (data_frame_rate is None) or (target_frame_rate is None) or (data_frame_rate == target_frame_rate):\n # The frame rates match. Do a direct copy.\n rate_ratio = 1\n raise_frame_rate = False\n max_src_len = max_trg_len\n src_start = max(0, src_end - max_src_len)\n actual_src_len = src_end - src_start\n trg_start = src_start\n actual_trg_len = actual_src_len\n elif data_frame_rate < target_frame_rate:\n # The source frame rate is too low. Copy each source item (or pad with zeros) as many times as needed.\n rate_ratio = target_frame_rate // data_frame_rate\n raise_frame_rate = True\n max_src_len = math.ceil(max_trg_len / rate_ratio) # Fewer source frames will be needed.\n src_start = max(0, src_end - max_src_len)\n actual_src_len = src_end - src_start\n trg_start = src_start * rate_ratio\n actual_trg_len = min(max_trg_len, actual_src_len * rate_ratio)\n else: # data_frame_rate > target_frame_rate\n # The source frame rate is too high. Skip the unneeded items.\n rate_ratio = data_frame_rate // target_frame_rate\n raise_frame_rate = False\n max_src_len = max_trg_len * rate_ratio # Some source frames will be dropped.\n src_start = max(0, src_end - max_src_len)\n actual_src_len = src_end - src_start\n trg_start = src_start // rate_ratio\n actual_trg_len = min(max_trg_len, (actual_src_len + rate_ratio - 1) // rate_ratio)\n\n trg_end = trg_start + actual_trg_len\n\n if not globals.full_state_mode:\n for cam in self.camera_names:\n traj = traj['load_images'](traj, start_idx=src_start, end_idx=src_end)\n subseq = traj['image'][cam][src_start:src_end]\n trg_data = self.copy_frames(subseq, actual_trg_len, rate_ratio, raise_frame_rate, pad_frame_gaps)\n images[cam].append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, *self.image_dims)),\n trg_data.reshape(1, actual_trg_len, *self.image_dims)\n ))\n if self.has_proprios:\n proprios.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, self.proprio_dim)),\n traj['proprio'][src_start:src_end].reshape(1, actual_trg_len, self.proprio_dim)\n ))\n else:\n full_states.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, self.full_state_dim)),\n traj['full_state'][src_start:src_end].reshape(1, actual_trg_len, self.full_state_dim)\n ))\n\n if self.has_actions:\n # Why the * -10?\n actions.append(cat1(\n np.ones((1, max_trg_len - actual_trg_len, self.action_dim)) * -10.,\n traj['action'][src_start:src_end].reshape(1, actual_trg_len, self.action_dim)\n ))\n\n if self.has_rewards:\n rewards.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, 1)),\n traj['reward'][src_start:src_end].reshape(1, actual_trg_len, 1)\n ))\n if 'rtg' in traj:\n returns.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, 1)),\n traj['rtg'][src_start:src_end].reshape(1, actual_trg_len, 1)\n ))\n else:\n rtgs = discount_cumsum(traj['reward'][src_start:], traj['success'][-1], gamma=discount)\n returns.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, 1)),\n rtgs[:actual_trg_len].reshape(1, actual_trg_len, 1)\n ))\n\n timesteps.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len), dtype=np.long),\n np.arange(trg_start, trg_end, dtype=np.long)[np.newaxis],\n dtype=np.long\n ))\n\n masks.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len)),\n np.ones((1, actual_trg_len))\n ))\n\n return [\n torchify(x, device)\n for x in (contexts, (images if not globals.full_state_mode else full_states), proprios, actions, rewards, returns, timesteps, masks)\n ]" }, { "identifier": "load_data", "path": "PLEX/util/data.py", "snippet": "def load_data(log, data_dir, tasks, max_trajectories, **kwargs):\n all_trajectories = {}\n for task, max_traj in zip(tasks, max_trajectories):\n if task.dataset_type == 'robomimic' or task.dataset_type == 'robosuite' or task.dataset_type == 'libero':\n trajectories = load_robomimic_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n elif task.dataset_type == 'metaworld':\n trajectories = load_metaworld_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n elif task.dataset_type == 'bridge' or task.dataset_type == 'bridge-v2':\n trajectories = load_bridge_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n elif task.dataset_type == 'd4rl':\n trajectories = load_d4rl_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n else:\n assert False, 'Unknown dataset type {} for task {}'.format(task.dataset_type, task)\n\n for task_name in trajectories:\n if task_name in all_trajectories:\n # This may happen if we are loading several sub-datasets for the same task, e.g., \"ph\" and \"mh\" subdatasets in robomimic\n # NOTE: Max trajectories limit should probably apply to *all* trajectories of this task but currently applies on a per-directory basis.\n all_trajectories[task_name].extend(trajectories[task_name])\n else:\n all_trajectories[task_name] = trajectories[task_name]\n return all_trajectories" }, { "identifier": "setup_batch_sampler", "path": "PLEX/util/data.py", "snippet": "def setup_batch_sampler(dataset, context_style, cmdline_args, device):\n context_fn = setup_context_sampler(context_style) if dataset.contextual else lambda *args, **kwargs: None\n return lambda batch_size, target_frame_rate, pad_frame_gaps: dataset.sample_batch(batch_size,\n target_frame_rate,\n pad_frame_gaps,\n max_len=((cmdline_args['obs_pred.K'] + cmdline_args['future_step']) if cmdline_args['model'] == 'PLEX' else cmdline_args['K']),\n get_context=context_fn,\n discount=cmdline_args['discount'],\n device=device,\n context_from_same_traj=cmdline_args['context_from_same_traj'])" }, { "identifier": "train_val_split", "path": "PLEX/util/data.py", "snippet": "def train_val_split(items, val_frac):\n items = list(items)\n n_total = len(items)\n train_val_split_rng.shuffle(items)\n n_val = round(val_frac * n_total)\n return items[n_val:], items[:n_val]" }, { "identifier": "parse_tasks", "path": "PLEX/util/misc.py", "snippet": "def parse_tasks(task_spec_str, robot=None, global_max_traj=None):\n if task_spec_str is None or task_spec_str == 'None':\n return [], []\n\n task_specs = parse_comma_sep_param_value(task_spec_str)\n descriptors = []\n max_trajs = []\n for task_spec in task_specs:\n if task_spec.startswith('(') and task_spec.endswith(')'):\n task_spec, max_traj = [part.strip('(): ') for part in task_spec.split(':')]\n max_trajs.append(int(max_traj))\n else:\n max_trajs.append(global_max_traj)\n\n if robot is None:\n task = task_spec\n else:\n # --TARGET_ROBOT-- is a reserved token that can't be used to name an actual robot.\n task = task_spec.replace('--TARGET_ROBOT--', robot)\n assert task != task_spec, 'Invalid task directory string: {}. Needs to contain the \\\"--TARGET_ROBOT--\\\" token'.format(task)\n\n descriptors.append(TaskDescriptor(task))\n return descriptors, max_trajs" }, { "identifier": "setup_essentials", "path": "PLEX/util/misc.py", "snippet": "def setup_essentials(cmdline_args):\n set_seed(cmdline_args['seed'])\n data_shuffling_rng = np.random.RandomState(cmdline_args['seed'])\n log = setup_logging(cmdline_args)\n device = cmdline_args.get('device', 'cuda')\n log_to_wandb = cmdline_args.get('log_to_wandb', False)\n timer = Timer(log)\n\n camera_names = parse_comma_sep_param_value(cmdline_args['camera_names'])\n\n # Very important! This sets up observation preprocessing (such as resizing images to a desired size and swapping their format from HWC to CWH)\n # that will be done by the robomimic library to specified observation types when these observations are loaded from robomimic's h5py files or\n # generated by robosuite.\n if 'FULL_STATE' in camera_names:\n assert len(camera_names) == 1, \"If FULL_STATE is present among camera names, it must be the only camera name.\"\n globals.full_state_mode = True\n else:\n globals.full_state_mode = False\n\n if not globals.full_state_mode:\n init_obs_preprocessing(camera_names, cmdline_args['image_size'])\n\n modalities_to_mask = parse_comma_sep_param_value(cmdline_args['modalities_to_mask'])\n data_dir = construct_data_dir_path(cmdline_args)\n common_env_metadata_dict = {'robosuite': None, 'metaworld': None, 'bridge': None}\n\n for modality in modalities_to_mask:\n assert modality in globals.MODALITIES\n\n return log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict" }, { "identifier": "setup_model", "path": "PLEX/util/misc.py", "snippet": "def setup_model(cmdline_args, example_task, log, device, camera_names, modalities_to_mask, data_dir, bc_mode):\n obs_dims, proprio_dim, action_dim = get_robot_dims(example_task, camera_names, cmdline_args['image_size'])\n pretrained_state_dict = {}\n\n # Load pretrained weights, if applicable\n load_path = cmdline_args['load_path']\n if load_path is not None:\n load_path = load_path.replace('--TARGET_ROBOT--', cmdline_args['robot'])\n log(f'Loading pretrained weights from {load_path}')\n pretrained_state_dict = torch.load(load_path)\n\n std_bounds = (cmdline_args['std_min'], cmdline_args['std_max'])\n\n tune_style_kwargs = {}\n tune_style_kwargs['image_encoder_tune_style'] = cmdline_args['image_encoder_tune_style']\n\n if cmdline_args['model'] == 'PLEX':\n assert cmdline_args['obs_pred.K'] is not None\n assert cmdline_args['inv_d_pred.K'] is not None\n assert cmdline_args['obs_pred.K'] >= cmdline_args['inv_d_pred.K']\n assert cmdline_args['obs_pred.K'] % cmdline_args['inv_d_pred.K'] == 0\n obs_pred_gpt2_kwargs = dict(\n n_layer=cmdline_args['obs_pred.n_layer'],\n n_head=cmdline_args['obs_pred.n_head'],\n K=cmdline_args['obs_pred.K'],\n activation_function=cmdline_args['activation_function'],\n resid_pdrop=cmdline_args['dropout'],\n attn_pdrop=cmdline_args['dropout']\n )\n inv_d_pred_gpt2_kwargs = dict(\n n_layer=cmdline_args['inv_d_pred.n_layer'],\n n_head=cmdline_args['inv_d_pred.n_head'],\n K=cmdline_args['inv_d_pred.K'],\n activation_function=cmdline_args['activation_function'],\n resid_pdrop=cmdline_args['dropout'],\n attn_pdrop=cmdline_args['dropout']\n )\n\n model = PLEX(\n camera_names=camera_names,\n obs_dims=obs_dims,\n proprio_dim=proprio_dim,\n act_dim=action_dim,\n hidden_dim=cmdline_args['embed_dim'],\n # The history length for this model is always the observation prediction model's history length:\n history_len=cmdline_args['obs_pred.K'],\n image_encoder_arch=cmdline_args['image_encoder_arch'],\n image_encoder_load=cmdline_args['image_encoder_load'],\n use_random_crops=True,\n pool_type=cmdline_args['pool_type'],\n action_output_type=cmdline_args['action_output_type'],\n impute_style=cmdline_args['impute_style'],\n data_dir=data_dir,\n relative_position_encodings=cmdline_args['relative_position_encodings'],\n future_step=cmdline_args['future_step'],\n std_bounds=std_bounds,\n obs_pred_gpt2_kwargs=obs_pred_gpt2_kwargs,\n inv_d_pred_gpt2_kwargs=inv_d_pred_gpt2_kwargs,\n modalities_to_mask=modalities_to_mask,\n bc_mode=bc_mode\n ).to(device=device)\n\n # Record the tune style parameters\n tune_style_kwargs['obs_pred_transformer_tune_style'] = cmdline_args['obs_pred.transformer_tune_style']\n tune_style_kwargs['inv_d_pred_transformer_tune_style'] = cmdline_args['inv_d_pred.transformer_tune_style']\n\n elif cmdline_args['model'] == 'DT':\n # Configure the model\n gpt2_kwargs = dict(\n n_layer=cmdline_args['n_layer'],\n n_head=cmdline_args['n_head'],\n activation_function=cmdline_args['activation_function'],\n resid_pdrop=cmdline_args['dropout'],\n attn_pdrop=cmdline_args['dropout'],\n relative_position_encodings=cmdline_args['relative_position_encodings']\n )\n\n model = DecisionTransformer(\n camera_names=camera_names,\n obs_dims=obs_dims,\n proprio_dim=proprio_dim,\n act_dim=action_dim,\n hidden_dim=cmdline_args['embed_dim'],\n history_len=cmdline_args['K'],\n image_encoder_arch=cmdline_args['image_encoder_arch'],\n image_encoder_load=cmdline_args['image_encoder_load'],\n use_random_crops=True,\n pool_type=cmdline_args['pool_type'],\n action_output_type=cmdline_args['action_output_type'],\n impute_style=cmdline_args['impute_style'],\n data_dir=data_dir,\n gpt2_kwargs=gpt2_kwargs,\n std_bounds=std_bounds,\n modalities_to_mask=modalities_to_mask,\n bc_mode=bc_mode\n ).to(device=device)\n\n # Record the tune style parameters\n tune_style_kwargs['transformer_tune_style'] = cmdline_args['transformer_tune_style']\n\n elif cmdline_args['model'] == 'MLP':\n model = MLPBCModel(\n camera_names=camera_names,\n obs_dims=obs_dims,\n proprio_dim=proprio_dim,\n act_dim=action_dim,\n hidden_dim=cmdline_args['embed_dim'],\n history_len=cmdline_args['K'],\n image_encoder_arch=cmdline_args['image_encoder_arch'],\n image_encoder_load=cmdline_args['image_encoder_load'],\n use_random_crops=True,\n impute_style=cmdline_args['impute_style'],\n n_layer=cmdline_args['n_layer'],\n activation_function=cmdline_args['activation_function'],\n dropout=cmdline_args['dropout'],\n modalities_to_mask=modalities_to_mask,\n bc_mode=bc_mode,\n std_bounds=std_bounds,\n ).to(device=device)\n\n # Record the tune style parameters\n # TODO\n\n else:\n raise NotImplementedError(f'Unknown model type: {cmdline_args.model}')\n log('Model architecture:')\n log(str(model))\n\n if len(pretrained_state_dict) > 0:\n model.load_state_dict(pretrained_state_dict)\n log('Loaded successfully!')\n else:\n log('Training/finetuning the model from scratch!')\n\n return model, tune_style_kwargs" }, { "identifier": "set_trainable_params", "path": "PLEX/util/misc.py", "snippet": "def set_trainable_params(model, trainable_param_spec, log):\n model.set_requires_grad(**trainable_param_spec)\n trainable_params = [p for p in model.parameters() if p.requires_grad]\n num_trainable_params = sum([p.numel() for p in trainable_params])\n num_params = sum([p.numel() for p in model.parameters()])\n log(f'Training {num_trainable_params} out of {num_params} total parameters')\n return trainable_params" }, { "identifier": "setup_trainer", "path": "PLEX/util/misc.py", "snippet": "def setup_trainer(batch_sampler, lr, eval_fns, model, trainable_params, cmdline_args):\n optimizer = torch.optim.AdamW(\n trainable_params,\n lr=lr,\n weight_decay=cmdline_args['weight_decay'],\n )\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lambda steps: min((steps+1)/cmdline_args['warmup_steps'], 1)\n )\n\n # Model-specific loss weights\n if cmdline_args['model'] == 'DT' or cmdline_args['model'] == 'MLP':\n loss_weights = {\n 'action': 1.0\n }\n elif cmdline_args['model'] == 'PLEX':\n loss_weights = {\n # This is the task-conditioned latent state prediction loss weight.\n # It should be 1.0 for PL pretraining and 0.0 for EX pretraining (since EX pretraining uses\n # task-agnostic data that makes task-conditioned latent state prediction impossible).\n # It should be 1.0 for target-task finetuning as well.\n 'future_prediction': cmdline_args['future_prediction_loss_weight']\n }\n # The EX part of PLEX (i.e., inversed dynamics -- action prediction based on the current and a future latent state)\n # can be trained using the future latent state of the training trajectory *or* the future latent state\n # predicted by the PL part of PLEX (the latent state predictor).\n # If we care about the former, we set grounded_inverse_dynamics_loss_weight = 1 and predicted_inverse_dynamics_loss_weight = 0.\n # If we care about the latter, then vice versa. In either case,\n # predicted_inverse_dynamics_loss_weight = 1 - grounded_inverse_dynamics_loss_weight.\n #\n # Namely, for EX pretraining we set grounded_inverse_dynamics_loss_weight = 1, because\n # the latent state predictor (PL) is unavailable at the time when EX is being pretrained.\n #\n # For PL pretraining, grounded_inverse_dynamics_loss_weight doesn't matter, because during PL pretraining\n # the inverse dynamics precictor (EX) is frozen and isn't affected by training, and the inverse dynamics\n # losses, in turn, don't affect the PL component of PLEX.\n #\n # For target-task finetuning of PLEX, we set predicted_inverse_dynamics_loss_weight = 1, because we want to adapt the\n # PL and EX components of PLEX to work together.\n for which in ['predicted', 'grounded']:\n key = f'{which}_inverse_dynamics'\n loss_weights[key] = cmdline_args[f'{key}_loss_weight']\n else:\n raise NotImplementedError\n\n return Trainer(\n model=model,\n optimizer=optimizer,\n get_batch=batch_sampler,\n batch_size=cmdline_args['batch_size'],\n target_frame_rate=cmdline_args['target_frame_rate'],\n pad_frame_gaps=cmdline_args['pad_frame_gaps'],\n scheduler=scheduler,\n loss_weights=loss_weights,\n eval_fns=eval_fns,\n )" }, { "identifier": "run_training", "path": "PLEX/util/misc.py", "snippet": "def run_training(trainer, model, num_steps, model_filename_prefix, cmdline_args, log, log_to_wandb, timer):\n log(f'Commencing training...')\n metrics = defaultdict(list)\n best = float('-inf')\n\n if cmdline_args['model'] == 'PLEX':\n model_info = f'plK{cmdline_args[\"obs_pred.K\"]}_plL{cmdline_args[\"obs_pred.n_layer\"]}_plH{cmdline_args[\"obs_pred.n_head\"]}_exK{cmdline_args[\"inv_d_pred.K\"]}_exL{cmdline_args[\"inv_d_pred.n_layer\"]}_exH{cmdline_args[\"inv_d_pred.n_head\"]}_res{cmdline_args[\"image_size\"]}_bc{cmdline_args[\"bc_learning_mode\"]}_la{cmdline_args[\"future_step\"]}_relpos{cmdline_args[\"relative_position_encodings\"]}__'\n elif cmdline_args['model'] == 'DT':\n model_info = f'K{cmdline_args[\"K\"]}_L{cmdline_args[\"n_layer\"]}_H{cmdline_args[\"n_head\"]}_res{cmdline_args[\"image_size\"]}_bc{cmdline_args[\"bc_learning_mode\"]}_relpos{cmdline_args[\"relative_position_encodings\"]}__'\n elif cmdline_args['model'] == 'MLP':\n model_info = f'K{cmdline_args[\"K\"]}_L{cmdline_args[\"n_layer\"]}_res{cmdline_args[\"image_size\"]}_bc{cmdline_args[\"bc_learning_mode\"]}__'\n else:\n raise NotImplementedError\n\n for iter in range(cmdline_args['max_iters']):\n with timer.time('iteration'):\n outputs = trainer.train_iteration(\n num_steps=num_steps,\n iter_num=iter+1,\n print_fn=log\n )\n\n for k, v in outputs.items():\n metrics[k].append(v)\n\n with open(log.dir/'metrics.pkl', 'wb') as f:\n pickle.dump(dict(metrics), f)\n\n if log_to_wandb:\n wandb.log(outputs)\n\n torch.save(model.state_dict(), log.dir/(model_filename_prefix + model_info + 'latest.pt'))\n torch.save(model.state_dict(), log.dir/(model_filename_prefix + model_info + f'iter_{iter+1}.pt'))\n metric_of_interest = outputs[cmdline_args['best_metric']]\n if metric_of_interest > best:\n best = metric_of_interest\n log(f'New best: {best}')\n torch.save(model.state_dict(), log.dir/(model_filename_prefix + model_info + 'best.pt'))\n\n print(f\"\\n\\nTHE BEST VALUE OF THE {cmdline_args['best_metric']} METRIC ACROSS ALL TRAINING ITERATIONS IS {best}.\")\n return dict(metrics)" }, { "identifier": "get_success_rate_evaluator", "path": "PLEX/util/evaluators.py", "snippet": "def get_success_rate_evaluator(task, traj_data, env_metadata, cmdline_args, log_dir):\n # Taking the average return of all trajectories as the target is dangerous: we may have many trajectories with low return.\n # target_return = sum(traj['reward'].sum() for traj in val_data.trajectories) / len(val_data.trajectories)\n get_context = setup_context_sampler(cmdline_args['context_style'])\n\n def eval_episodes(model, step):\n conditions = []\n\n if cmdline_args['record_video']:\n record_traj_dir = (log_dir/f'videos_from_epoch_{step}')\n record_traj_dir.mkdir(parents=True)\n else:\n record_traj_dir = None\n\n returns = []\n\n # ASSUMPTIONS:\n # -- In goal-directed tasks, each successful (goal-reaching) trajectory has a higher score than every non-goal-reaching one.\n # -- Every goal-reaching trajectory stays at a goal state once it reaches one.\n for traj in traj_data.trajectories:\n returns.append(discount_cumsum(traj['reward'], traj['success'][-1], cmdline_args['discount'])[0])\n\n returns.sort(reverse=True)\n # [top_return_LO, top_return_HI] is the range of returns corresponding to the top cmdline_args['top_return_fraction'] fraction of\n # the demonstration trajectories\n top_return_LO = returns[math.ceil(cmdline_args['top_return_fraction'] * len(returns)) - 1]\n top_return_HI = returns[0]\n\n if not cmdline_args['bc_learning_mode']:\n print(f\"Top return range: {top_return_LO} -- {top_return_HI}\")\n\n for e in range(cmdline_args['num_eval_episodes']):\n while True:\n # During evaluation with success_rate (and simultaneously success rate) as the metric,\n # validation_frac is just the fraction of training trajectories whose goal images will serve as contexts during evaluation.\n # Note that each episode will generally start with a scene where even objects other than the goal objects will\n # generally be positioned differently than in any goal image the agent has seen in training, so sampling evaluation-time\n # contexts from among the training trajectory goals is fine.\n val_traj = random.choice(traj_data.trajectories)\n context, is_valid = get_context(val_traj, 0, len(val_traj['reward']))\n if is_valid:\n break\n\n target_return = (top_return_LO + random.random() * (top_return_HI - top_return_LO))\n # If the learning mode *is* BC (as opposed to offline RL), then we will ignore\n # target return during conditioning, so its value won't matter.\n if not cmdline_args['bc_learning_mode']:\n print(f\"Target return for episode {e}: {target_return}\")\n\n conditions.append((context, target_return))\n\n if not cmdline_args['bc_learning_mode']:\n # Make sure that either:\n # (a) these settings are the same as at training time or\n # (b) the model was trained and is being evaluated in BC mode (i.e., rewards/returns weren't used\n # at training time and are ignored at evaluation time).\n print(f'Is the reward normalized **at evaluation time**: {cmdline_args[\"normalize_reward\"]}')\n print(f'Type of reward to be used for conditioning at evaluation time: {cmdline_args[\"reward_type\"]}')\n\n returns, succ_episodes, lengths = evaluate_parallel(\n conditions, task, model,\n device=cmdline_args.get('device', 'cuda'),\n use_normalized_reward=cmdline_args['normalize_reward'],\n reward_type=cmdline_args['reward_type'],\n env_meta=env_metadata,\n full_state_mode = globals.full_state_mode,\n min_time_at_goal_for_success=cmdline_args['min_time_at_goal_for_success'],\n camera_names=parse_comma_sep_param_value(cmdline_args['camera_names']),\n image_size=cmdline_args['image_size'],\n num_workers=cmdline_args['num_eval_workers'],\n max_ep_len=cmdline_args['max_eval_episode_len'],\n discount=cmdline_args['discount'],\n record_camera=(DEFAULT_CAM[task.dataset_type] if cmdline_args['record_camera'] is None else cmdline_args['record_camera']),\n record_traj_dir=record_traj_dir\n )\n\n num_succ = len([s for s in succ_episodes if s is True])\n success_rate = num_succ/len(succ_episodes)*100\n\n print(f'Iteration {step} SUCCESS RATE: {success_rate}%')\n print(f'Iteration {step} MEAN NATIVE RETURN: {np.mean(returns)}')\n print(f'Iteration {step} MEAN EPISODE LENGTH: {np.mean(lengths)}')\n\n return {\n 'success_rate': success_rate,\n 'return_mean': np.mean(returns),\n 'return_std': np.std(returns),\n 'length_mean': np.mean(lengths),\n 'length_std': np.std(lengths)\n }\n\n return eval_episodes" }, { "identifier": "get_validation_error_evaluator", "path": "PLEX/util/evaluators.py", "snippet": "def get_validation_error_evaluator(dataset, cmdline_args, device):\n get_val_batch = setup_batch_sampler(dataset, cmdline_args['context_style'], cmdline_args, device)\n\n def validation_error(model, iter):\n errors = []\n for _ in range(cmdline_args['validation_samples']):\n contexts, images, states, actions, rewards, rtg, timesteps, attention_mask = get_val_batch(cmdline_args['batch_size'],\n cmdline_args['target_frame_rate'],\n cmdline_args['pad_frame_gaps'])\n with torch.no_grad():\n action_preds = model.forward(\n contexts, images, states,\n actions if isinstance(model.module, PLEX) else actions[:,:-1],\n rewards,\n rtg,\n timesteps,\n mask=attention_mask,\n )[0]\n\n if isinstance(model.module, PLEX):\n act_dim = action_preds.shape[2]\n attention_mask_shortened = attention_mask[:,:-cmdline_args['future_step']]\n action_preds = action_preds.reshape(-1, act_dim)[attention_mask_shortened.reshape(-1) > 0]\n action_target = torch.clone(actions[:,:-cmdline_args['future_step']]).reshape(-1, act_dim)[attention_mask_shortened.reshape(-1) > 0]\n else:\n action_target = actions[:,-1]\n\n # We are negating the error here for consistency with other metrics, which are maximization metrics.\n error = -torch.mean((action_preds - action_target) ** 2).item()\n errors.append(error)\n return {\n f'neg_val_error': np.mean(errors)\n }\n return validation_error" }, { "identifier": "add_common_args", "path": "PLEX/util/cmdline.py", "snippet": "def add_common_args(parser):\n # Logging\n parser.add_argument('--log_dir', type=str, default='~/logs')\n parser.add_argument('--log_id', type=str, default=None)\n parser.add_argument('--log_to_wandb', '-w', action='store_true')\n\n # General setup\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--device', type=str, default='cuda')\n\n # Core model\n parser.add_argument('--model', type=str, default='DT')\n # This is the load path for the starting model. If None, the starting model is initialized randomly.\n parser.add_argument('--load_path', type=str, default=None)\n parser.add_argument('--modalities_to_mask', type=str, default='action')\n parser.add_argument('--impute_style', type=str, default='trainable')\n\n # Parameters for the Gaussian action head, if used\n parser.add_argument('--std_min', type=float, default=0.001)\n parser.add_argument('--std_max', type=float, default=1.0)\n\n ### Decision transformer parameters\n parser.add_argument('--K', type=int, default=10)\n parser.add_argument('--n_layer', type=int, default=None) # The default is None to easily detect when this pipeline is running the DT model unintentionally.\n parser.add_argument('--n_head', type=int, default=None)\n parser.add_argument('--activation_function', type=str, default='relu')\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--embed_dim', type=int, default=128) # NOTE: embed_dim must be a multiple of n_head!\n parser.add_argument('--transformer_tune_style', type=str, default=None,\n choices=['all', 'last_block', 'linear_probe', 'none'])\n ### PLEX parameters\n parser.add_argument('--future_step', type=int, default=1)\n parser.add_argument('--obs_pred.n_layer', type=int, default=None)\n parser.add_argument('--obs_pred.n_head', type=int, default=None)\n parser.add_argument('--obs_pred.K', type=int, default=None)\n parser.add_argument('--obs_pred.transformer_tune_style', type=str, default=None,\n choices=['all', 'last_block', 'linear_probe', 'none'])\n\n parser.add_argument('--inv_d_pred.n_layer', type=int, default=None)\n parser.add_argument('--inv_d_pred.n_head', type=int, default=None)\n parser.add_argument('--inv_d_pred.K', type=int, default=None)\n parser.add_argument('--inv_d_pred.transformer_tune_style', type=str, default=None,\n choices=['all', 'last_block', 'linear_probe', 'none'])\n ### This applies only to transformer-based models\n add_boolean_arg(parser, 'relative_position_encodings', true='--relative_position_encodings', false='--absolute_position_encodings', default=True)\n parser.add_argument('--action_output_type', type=str, default='deterministic',\n choices=['deterministic', 'gaussian', 'gaussian_mixture'])\n\n # Image encoder\n parser.add_argument('--image_encoder_arch', type=str, default='resnet18')\n parser.add_argument('--image_encoder_load', type=str, default=None)\n parser.add_argument('--pool_type', type=str, default='SpatialSoftmax')\n parser.add_argument('--image_encoder_tune_style', type=str, default='all') # none, fc, lastN (N an integer), or all\n\n # Data\n parser.add_argument('--data_dir', type=str, default='~/data')\n # --camera_names can have a special value FULL_STATE.\n # FULL_STATE means that the agent should use the full_state field returned by the data/env, and should *not* use proprio states.\n # In this case, the encoder is automatically set to be a linear layer mapping the full state dimentsion to the model's hidden dimnesion.\n # The image_size should then have the size M,1 or 1,N, where M or N are the length of the full state vectors.\n parser.add_argument('--camera_names', type=str, default='agentview') # E.g., --camera_names=agentview,robot0_eye_in_hand\n # If --image_size is a single number N, the image is interpreted to be of dimensions N x N.\n # If it is two numbers -- M,N -- the image is interpreted to have height M and width N.\n # NOTE: If --image_size is two numbers -- M,N as above -- and either M or N is 1, the image contents are interpreted as\n # an image embedding vector. The --image_encoder_arch is then ignored, and the encoder is automatically set to be a linear layer mapping\n # the embedding dimension to the model's hidden dimnesion.\n parser.add_argument('--image_size', type=int, default=84)\n # Frames-per-second for the desired frame rate (usually, a target task's). The default is to ignore frame rates.\n parser.add_argument('--target_frame_rate', type=int, default=None)\n add_boolean_arg(parser, 'pad_frame_gaps', default=True,\n true='--pad_frame_gaps', false='--copy_into_frame_gaps')\n # Dynamics and action spaces are generally problem-specific, so we use robot-specifc data for them, as well as for validation tasks.\n parser.add_argument('--robot', type=str, default=None)\n\n # Training\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--max_iters', type=int, default=10)\n parser.add_argument('--warmup_steps', type=int, default=100)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n\n # Evaluation\n # What fraction of the top demo trajectory returns will be used during evaluation?\n # NOTE: this parameter is relevant only if we are in the offline RL mode, not BC mode.\n parser.add_argument('--top_return_fraction', type=float, default=1.0)\n parser.add_argument('--best_metric', type=str, default='evaluation/neg_val_error', choices=['evaluation/neg_val_error', 'evaluation/success_rate'])\n # NOTE: during pretraining, --validation_frac applies *only* to tasks specified by --validation_tasks,\n # and specify the fraction of these tasks' trajectrories that will be used for validation.\n #\n # NOTE: The remaining validation trajectories of these tasks will be used for pretraining.\n # I.e., if you want all data of the --validation_tasks to be used only for validation and none for pretraining,\n # set --validation_frac=1.0 (or just don't specify --validation_frac at all, since 1.0 is the default).\n #\n # During finetuning, --validation_frac applies only to --target_task, and --validation_tasks must be None.\n #\n # NOTE: If during finetuning --best_metric is evaluation/success_rate (i.e., success rate),\n # --validation_frac is ignored and all of --target_task's trajectories are used for training. In this case,\n # validation loss isn't computed.\n #\n # NOTE: the following parameters are relevant only if best_metric is negative validation error.\n parser.add_argument('--validation_frac', type=float, default=1.0)\n parser.add_argument('--validation_samples', type=int, default=100) # how many sample batches on which to measure error\n # NOTE: the following parameters are relevant only if best_metric is success rate.\n parser.add_argument('--max_eval_episode_len', type=int, default=500)\n parser.add_argument('--num_eval_episodes', type=int, default=10)\n parser.add_argument('--num_eval_workers', type=int, default=5)\n parser.add_argument('--min_time_at_goal_for_success', type=int, default=5) # Minimum number of consecutive time steps an agent should spend at a goal state during an evaluation episode for the episode to terminate with a success.\n parser.add_argument('--record_camera', type=str, default=None)\n add_boolean_arg(parser, 'record_video', true='--record_video', false='--no_video', default=False)" }, { "identifier": "add_conditioning_args", "path": "PLEX/util/cmdline.py", "snippet": "def add_conditioning_args(parser):\n # Chooses between behavior cloning mode (the default, involves conditioning only on a goal, if available)\n # and offline RL mode (involves conditioning on a goal, if available, and on a return).\n add_boolean_arg(parser, 'bc_learning_mode', true='--bc_learning_mode', false='--orl_learning_mode', default=True)\n parser.add_argument('--context_style', type=str, default='first-success')\n add_boolean_arg(parser, 'context_from_same_traj', true='--context_from_same_traj', false='--context_from_diff_traj', default=False)\n # reward_type can be 'native', 'negative', 'random', 'zero', or 'sparse'.\n parser.add_argument('--reward_type', type=str, default='native')\n add_boolean_arg(parser, 'normalize_reward', true='--normalize_reward', false='--use_raw_reward', default=False)\n parser.add_argument('--discount', type=float, default=0.99)" }, { "identifier": "setup_wandb_logging", "path": "PLEX/util/log.py", "snippet": "def setup_wandb_logging(group_name, cmdline_args):\n exp_prefix = f'{group_name}_{random.randint(int(1e5), int(1e6) - 1)}'\n wandb.init(\n name=exp_prefix,\n group=group_name,\n project='PLEX',\n config=cmdline_args\n )\n # wandb.watch(model) # wandb has some bug" } ]
import os import torch import argparse import sys from PLEX.util.data import TrajectoryDataset, load_data, setup_batch_sampler, train_val_split from PLEX.util.misc import parse_tasks, setup_essentials, setup_model, set_trainable_params, setup_trainer, run_training from PLEX.util.evaluators import get_success_rate_evaluator, get_validation_error_evaluator from PLEX.util.cmdline import add_common_args, add_conditioning_args from PLEX.util.log import setup_wandb_logging
11,382
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1
log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args)
5
2023-11-06 09:38:09+00:00
16k
Giftify-Bot/Giftify-Bot
cogs/giveaways/start.py
[ { "identifier": "Giftify", "path": "bot.py", "snippet": "class Giftify(GiftifyHelper, commands.AutoShardedBot):\r\n user: discord.ClientUser\r\n\r\n colour: int = 0xCB3045\r\n __version_info__ = \"1.1.4\"\r\n\r\n def __init__(\r\n self,\r\n *,\r\n log_handler: LogHandler,\r\n pool: asyncpg.Pool,\r\n session: aiohttp.ClientSession,\r\n amari_client: AmariClient,\r\n ) -> None:\r\n self._log_handler = log_handler\r\n self._pool = pool\r\n self._session = session\r\n self._amari_client = amari_client\r\n\r\n intents = discord.Intents(messages=True, emojis=True, guilds=True)\r\n allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False)\r\n member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents)\r\n\r\n sentry_sdk.init(\r\n dsn=os.environ[\"SENTRY_DSN\"],\r\n integrations=[\r\n LoggingIntegration(\r\n level=logging.INFO,\r\n event_level=logging.ERROR,\r\n )\r\n ],\r\n traces_sample_rate=1.0,\r\n )\r\n\r\n super().__init__(\r\n command_prefix=commands.when_mentioned,\r\n tree_cls=CommandTree,\r\n help_command=None,\r\n description=\"A giveaway bot for hosting giveaways.\",\r\n intents=intents,\r\n allowed_mentions=allowed_mentions,\r\n chunk_guilds_at_startup=False,\r\n max_messages=None,\r\n activity=discord.CustomActivity(name=\"\\N{LINK SYMBOL} https://giftifybot.vercel.app\"),\r\n member_cache_flags=member_cache_flags,\r\n owner_ids=OWNER_IDS,\r\n )\r\n\r\n @property\r\n def log_handler(self) -> LogHandler:\r\n return self._log_handler\r\n\r\n @property\r\n def pool(self) -> asyncpg.Pool:\r\n return self._pool\r\n\r\n @property\r\n def session(self) -> aiohttp.ClientSession:\r\n return self._session\r\n\r\n @property\r\n def amari_client(self) -> AmariClient:\r\n return self._amari_client\r\n\r\n @property\r\n def timer_cog(self) -> TimerManager:\r\n return self.get_cog(\"TimerManager\") # type: ignore\r\n\r\n def run(self) -> None:\r\n raise NotImplementedError(\"Please use `.start()` instead.\")\r\n\r\n async def on_ready(self) -> None:\r\n self.log_handler.log.info(\"%s got a ready event at %s\", self.user.name, datetime.datetime.now())\r\n\r\n async def on_resume(self) -> None:\r\n self.log_handler.log.info(\"%s got a resume event at %s\", self.user.name, datetime.datetime.now())\r\n\r\n async def on_command_error(self, ctx: commands.Context, error: commands.CommandError) -> None:\r\n if isinstance(error, commands.CommandInvokeError):\r\n origin_ = error.original\r\n assert ctx.command is not None\r\n if not isinstance(origin_, discord.HTTPException):\r\n print(f\"In {ctx.command.qualified_name}:\", file=sys.stderr)\r\n traceback.print_tb(origin_.__traceback__)\r\n print(f\"{origin_.__class__.__name__}: {origin_}\", file=sys.stderr)\r\n sentry_sdk.capture_exception(error)\r\n\r\n async def start(self) -> None:\r\n await super().start(token=os.environ[\"TOKEN\"], reconnect=True)\r\n\r\n async def setup_hook(self) -> None:\r\n self.start_time: datetime.datetime = datetime.datetime.now(datetime.timezone.utc)\r\n\r\n self.bot_app_info = await self.application_info()\r\n self.owner_ids = OWNER_IDS\r\n\r\n async def get_or_fetch_user(self, user_id: int) -> Optional[discord.User]:\r\n \"\"\"Looks up a user in cache or fetches if not found.\r\n\r\n Parameters\r\n -----------\r\n user_id: int\r\n The user ID to search for.\r\n\r\n Returns\r\n ---------\r\n Optional[User]\r\n The user or None if not found.\r\n \"\"\"\r\n\r\n user = self.get_user(user_id)\r\n if user is not None:\r\n return user\r\n\r\n try:\r\n user = await self.fetch_user(user_id)\r\n except discord.HTTPException:\r\n return None\r\n else:\r\n return user\r\n\r\n async def get_or_fetch_member(self, guild: discord.Guild, member_id: int) -> Optional[discord.Member]:\r\n \"\"\"Looks up a member in cache or fetches if not found.\r\n\r\n Parameters\r\n -----------\r\n guild: Guild\r\n The guild to look in.\r\n member_id: int\r\n The member ID to search for.\r\n\r\n Returns\r\n ---------\r\n Optional[Member]\r\n The member or None if not found.\r\n \"\"\"\r\n\r\n member = guild.get_member(member_id)\r\n if member is not None:\r\n return member\r\n\r\n shard: discord.ShardInfo = self.get_shard(guild.shard_id) # type: ignore # will never be None\r\n if shard.is_ws_ratelimited():\r\n try:\r\n member = await guild.fetch_member(member_id)\r\n except discord.HTTPException:\r\n return None\r\n else:\r\n return member\r\n\r\n members = await guild.query_members(limit=1, user_ids=[member_id], cache=True)\r\n if not members:\r\n return None\r\n return members[0]\r" }, { "identifier": "ChannelConfig", "path": "models/giveaway_settings.py", "snippet": "class ChannelConfig:\n \"\"\"Represents the configuration settings for a channel.\n\n Attributes\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel associated with the config.\n guild: discord.Guild\n The guild to which the channel belongs.\n required_roles: List[discord.Role]\n The list of default required roles.\n blacklisted_roles: List[discord.Role]\n The list of default blacklisted roles.\n bypass_roles: List[discord.Role]\n The list of default bypass_roles.\n multiplier_roles: Dict[discord.Role, int]\n The role and number of multiplier_roles entries mapping.\n ping: Optional[discord.Role]\n The default ping role for some channel.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"channel\",\n \"guild\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"ping\",\n )\n\n def __init__(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n guild: discord.Guild,\n *,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n ping: Optional[discord.Role] = None,\n ):\n self.channel = channel\n self.guild = guild\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.ping = ping\n\n def __repr__(self):\n return f\"<ChannelConfig channel={self.channel!r}>\"\n\n @classmethod\n def from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n ) -> Optional[\"ChannelConfig\"]:\n \"\"\"Create a ChannelConfig object from given data.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild to which the channel belongs.\n value: Any\n The new value for the column.\n\n Returns\n -------\n ChannelConfig\n The updated `ChannelConfig` instance.\n \"\"\"\n\n data = dict(data)\n\n # We do not need these\n channel_id = data.pop(\"channel\")\n channel = guild.get_channel(channel_id)\n if channel is None:\n return\n\n assert isinstance(channel, (discord.TextChannel, discord.CategoryChannel))\n\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is not None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier_roles\n for role, multiplier_roles in data[\"multiplier_roles\"].items()\n if role is not None\n }\n\n data.pop(\"guild\")\n\n return cls(channel, guild, **data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"ChannelConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n ChannelConfig\n The updated `ChannelConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n if isinstance(value, list):\n value = [role.id for role in value if role is not None]\n elif isinstance(value, dict):\n value = {\n role.id: multiplier_roles\n for role, multiplier_roles in value.items()\n if role is not None\n }\n elif isinstance(value, discord.Role):\n value = value.id\n else:\n raise ValueError(\"Unknown type given.\")\n\n query = f\"\"\"INSERT INTO channel_configs (guild, channel, {column}) VALUES ($1, $2, $3)\n ON CONFLICT (guild, channel) DO\n UPDATE SET {column} = excluded.{column}\"\"\"\n\n await pool.execute(\n query,\n self.guild.id,\n self.channel.id,\n value,\n )\n\n return self\n\n @classmethod\n async def create(\n cls,\n guild: discord.Guild,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n pool: asyncpg.Pool,\n ) -> \"ChannelConfig\":\n query = \"\"\"INSERT INTO channel_configs (guild, channel) VALUES ($1, $2) RETURNING *\"\"\"\n\n record = await pool.fetchrow(\n query,\n guild.id,\n channel.id,\n )\n\n instance = cls.from_data(guild, record)\n assert instance is not None # Since we just created it.\n return instance\n\n @staticmethod\n async def delete(channel_id: int, guild_id: int, pool: asyncpg.Pool):\n \"\"\"Delete the current ChannelConfig object.\n\n Parameters\n ----------\n channel_id: int\n The ID of the channel.\n guild_id: int\n The ID of the guild.\n pool: asyncpg.Pool\n The database connection pool.\n \"\"\"\n\n query = \"\"\"DELETE FROM channel_configs\n WHERE guild = $ AND channel = $2\"\"\"\n\n await pool.execute(query, guild_id, channel_id)" }, { "identifier": "Giveaway", "path": "models/giveaways.py", "snippet": "class Giveaway:\n \"\"\"\n Represents a giveaway object.\n\n Attributes\n ----------\n bot: Giftify\n The bot instance to handle the giveaway.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the giveaway message.\n extra_message_id: int\n The ID of the extra message with giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the user donating for the giveaway.\n prize: int\n The prize of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n winners: List[int]\n The winners of the giveaway.\n participants: List[int]\n The IDs participants for the giveaway.\n ended: bool\n Indicates whether the giveaway has ended.\n ends: datetime.datetime\n The timestamp when the giveaway will be ended.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[List[int]]\n The ID of the channels where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n \"\"\"\n\n __slots__ = (\n \"bot\",\n \"guild_id\",\n \"channel_id\",\n \"message_id\",\n \"extra_message_id\",\n \"prize\",\n \"host_id\",\n \"donor_id\",\n \"winner_count\",\n \"winners\",\n \"participants\",\n \"ended\",\n \"ends\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"messages\",\n \"messages_required\",\n \"allowed_message_channels\",\n \"amari\",\n \"weekly_amari\",\n )\n\n def __init__(self, *, bot: Giftify, record: asyncpg.Record):\n self.bot = bot\n self.guild_id: int = record[\"guild\"]\n self.channel_id: int = record[\"channel\"]\n self.message_id: int = record[\"message\"]\n self.extra_message_id: int = record[\"extra_message\"]\n self.prize: str = record[\"prize\"]\n self.host_id: int = record[\"host\"]\n self.donor_id: Optional[int] = record[\"donor\"]\n self.winner_count: int = record[\"winner_count\"]\n self.winners: List[int] = record[\"winners\"]\n self.participants: List[int] = record[\"participants\"]\n self.ended: bool = record[\"ended\"]\n self.ends: datetime.datetime = record[\"ends\"]\n self.required_roles: List[int] = record[\"required_roles\"] or []\n self.blacklisted_roles: List[int] = record[\"blacklisted_roles\"] or []\n self.bypass_roles: List[int] = record[\"bypass_roles\"] or []\n self.multiplier_roles: Dict[int, int] = {\n int(role): entries\n for role, entries in record[\"multiplier_roles\"].items()\n if entries > 1\n }\n self.messages: Dict[int, int] = {\n int(member): messages for member, messages in record[\"messages\"].items()\n }\n self.messages_required: Optional[int] = record[\"messages_required\"]\n self.allowed_message_channels: Optional[List[int]] = record[\"messages_channel\"]\n self.amari: Optional[int] = record[\"amari\"]\n self.weekly_amari: Optional[int] = record[\"weekly_amari\"]\n\n def __eq__(self, other: \"Giveaway\") -> bool:\n try:\n return (\n self.guild_id == other.guild_id\n and self.channel_id == other.channel_id\n and self.message_id == other.message_id\n )\n except AttributeError:\n return False\n\n def __hash__(self) -> int:\n return hash((self.guild_id, self.channel_id, self.message_id))\n\n def __repr__(self) -> str:\n return f\"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>\"\n\n @property\n def jump_to_giveaway(self) -> discord.ui.View:\n url = f\"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}\"\n view = BaseView(timeout=None)\n button = discord.ui.Button(label=\"Jump To Giveaway\", url=url)\n view.add_item(button)\n return view\n\n @staticmethod\n def create_embed(\n interaction: Interaction,\n config: GuildConfig,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n donor: Optional[discord.Member] = None,\n ) -> discord.Embed:\n assert interaction.guild is not None\n\n description = f\"Click the {config.reaction} button to join the giveaway!\\n\"\n description += f\"Hosted By: {interaction.user.mention}\\n\"\n\n if donor:\n description += f\"Donor: {donor.mention}\\n\"\n\n description += f\"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\\n\"\n\n embed = discord.Embed(\n title=prize,\n description=description,\n colour=config.color,\n timestamp=duration,\n )\n embed.set_footer(\n text=f\"{winners} winner(s) • Ends\",\n icon_url=interaction.guild.icon or interaction.client.user.display_avatar,\n )\n requirements = \"\"\n if required_roles:\n requirements += f\"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\\n\"\n if bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\\n\"\n\n if blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\\n\"\n if messages_required:\n requirements += (\n f\"Messages Required: **{messages_required}** message(s) (5s cooldown)\\n\"\n )\n if allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\\n\"\n\n if amari:\n requirements += f\"Amari Level: {amari}\\n\"\n if weekly_amari:\n requirements += f\"Weekly Amari: {weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if multiplier_roles:\n multiplier_roles_mention = \"\\n\".join(\n [\n f\"- {entry}x ・ {role.mention}\"\n for role, entry in multiplier_roles.items()\n if role is not None\n ]\n )\n embed.add_field(\n name=\"Bonus Entries\", value=multiplier_roles_mention, inline=False\n )\n\n return embed\n\n @classmethod\n async def start(\n cls,\n interaction: Interaction,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n config: GuildConfig,\n channel_config: Optional[ChannelConfig],\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n image: Optional[discord.Attachment] = None,\n donor: Optional[discord.Member] = None,\n ping: bool = False,\n message: Optional[str] = None,\n ):\n assert isinstance(interaction.channel, discord.TextChannel)\n assert interaction.guild is not None\n\n embed = cls.create_embed(\n interaction=interaction,\n config=config,\n duration=duration,\n winners=winners,\n prize=prize,\n required_roles=required_roles,\n blacklisted_roles=blacklisted_roles,\n bypass_roles=bypass_roles,\n multiplier_roles=multiplier_roles,\n messages_required=messages_required,\n allowed_message_channels=allowed_message_channels,\n amari=amari,\n weekly_amari=weekly_amari,\n donor=donor,\n )\n view = GiveawayView(\n config.reaction, config.participants_reaction, config.button_style\n )\n giveaway_message = await interaction.channel.send(\n config.gw_header, embed=embed, view=view\n )\n\n message_embed = discord.Embed(\n title=f\"{GIFT_EMOJI} Giveaway\",\n description=f\"**Message・** {message}\" if message else None,\n color=config.color,\n )\n\n if image:\n message_embed.set_image(url=image)\n\n extra_message = None\n\n if ping or image:\n ping_role = (\n channel_config.ping\n if channel_config and channel_config.ping\n else config.ping\n )\n extra_message = await interaction.channel.send(\n ping_role.mention if ping_role else \"\",\n embed=message_embed if message or image else None, # type: ignore\n allowed_mentions=discord.AllowedMentions(roles=True),\n )\n\n if extra_message is None and message is not None:\n extra_message = await interaction.channel.send(embed=message_embed)\n\n await interaction.client.timer_cog.create_timer(\n message_id=giveaway_message.id,\n channel_id=interaction.channel.id,\n guild_id=interaction.guild.id,\n author_id=interaction.user.id,\n title=\"Giveaway\",\n event=\"giveaway\",\n expires=duration,\n pool=interaction.client.pool,\n )\n\n return await cls.create_entry(\n bot=interaction.client,\n guild_id=interaction.guild.id,\n channel_id=interaction.channel.id,\n message_id=giveaway_message.id,\n prize=prize,\n host_id=interaction.user.id,\n donor_id=donor.id if donor else None,\n winner_count=winners,\n ends=duration,\n required_roles=[role.id for role in required_roles if role is not None]\n if required_roles\n else [],\n blacklisted_roles=[\n role.id for role in blacklisted_roles if role is not None\n ]\n if blacklisted_roles\n else [],\n bypass_roles=[role.id for role in bypass_roles if role is not None]\n if bypass_roles\n else [],\n multiplier_roles={\n role.id: entries\n for role, entries in multiplier_roles.items()\n if role is not None\n }\n if multiplier_roles\n else {},\n messages={},\n messages_required=messages_required,\n allowed_message_channels=[c.id for c in allowed_message_channels]\n if allowed_message_channels\n else [],\n extra_message_id=extra_message.id if extra_message else None,\n amari=amari,\n weekly_amari=weekly_amari,\n )\n\n @classmethod\n async def create_entry(\n cls,\n bot: Giftify,\n guild_id: int,\n channel_id: int,\n message_id: int,\n prize: str,\n host_id: int,\n winner_count: int,\n ends: datetime.datetime,\n required_roles: List[int],\n blacklisted_roles: List[int],\n bypass_roles: List[int],\n donor_id: Optional[int],\n multiplier_roles: Optional[dict],\n messages: Optional[dict],\n messages_required: Optional[int],\n allowed_message_channels: Optional[List[int]],\n extra_message_id: Optional[int],\n amari: Optional[int],\n weekly_amari: Optional[int],\n ) -> \"Giveaway\":\n \"\"\"\n Create a new Giveaway object and insert it into the database.\n\n Parameters\n ----------\n bot: Giftify\n The bot instance.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the message having the giveaway view.\n prize: str\n The prize of the giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the donor of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n ends: datetime.datetime\n The time when the giveaway ends.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[int]\n The ID of the channel where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n\n Returns\n -------\n Giveaway\n The created Giveaway object.\n \"\"\"\n record = await bot.pool.fetchrow(\n \"INSERT INTO giveaways (guild, channel, message, extra_message, host, donor, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, messages_channel, amari, weekly_amari) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) \"\n \"RETURNING *\",\n guild_id,\n channel_id,\n message_id,\n extra_message_id,\n host_id,\n donor_id,\n prize,\n winner_count,\n ends,\n required_roles,\n blacklisted_roles,\n bypass_roles,\n multiplier_roles,\n messages,\n messages_required,\n allowed_message_channels,\n amari,\n weekly_amari,\n )\n return cls(bot=bot, record=record)\n\n async def check_requirements(self, member: discord.Member) -> None:\n missing_roles = [\n role.mention\n for role_id in self.required_roles\n if (role := member.guild.get_role(role_id)) and role not in member.roles\n ]\n if missing_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you are missing the following required roles: {', '.join(missing_roles)}\"\n )\n\n blacklisted_roles = [\n role.mention\n for role_id in self.blacklisted_roles\n if (role := member.guild.get_role(role_id)) and role in member.roles\n ]\n if blacklisted_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you have the following blacklisted roles: {', '.join(blacklisted_roles)}\"\n )\n\n if self.amari:\n if (user_level := await self.bot.fetch_level(member)) < self.amari:\n raise GiveawayError(\n f\"Your amari level is less than the required level, you need `{self.amari - user_level}` more level(s) to join the giveaway.\"\n )\n\n if self.weekly_amari:\n if (\n weekly_exp := await self.bot.fetch_weekly_experience(member)\n ) < self.weekly_amari:\n raise GiveawayError(\n f\"Your weekly amari experience is less than the required weekly amari experience, you need `{self.weekly_amari - weekly_exp}` more experience point(s) to join the giveaway.\"\n )\n\n if self.messages_required and self.messages_required > 0:\n if (\n user_messages := self.messages.get(member.id, 0)\n ) < self.messages_required:\n raise GiveawayError(\n f\"You have sent less messages than the required messages, you need to send `{self.messages_required - user_messages}` more messages to join the giveaway.\"\n )\n\n def can_bypass(self, member: discord.Member) -> bool:\n return any(\n member.guild.get_role(role_id) in member.roles\n for role_id in self.bypass_roles\n )\n\n def get_multiplier_entries(self, member: discord.Member) -> int:\n entries = 0\n for role_id, multiplier_roles_entries in self.multiplier_roles.items():\n if member.get_role(int(role_id)):\n entries += multiplier_roles_entries\n\n return entries or 1\n\n async def join(self, member: discord.Member) -> int:\n try:\n await self.check_requirements(member)\n except GiveawayError as error:\n if not self.can_bypass(member):\n raise error\n\n if member.id in self.participants:\n raise GiveawayError(\"You have already joined the giveaway.\")\n\n number_of_entries = self.get_multiplier_entries(member)\n entries = [member.id] * number_of_entries\n\n self.participants += entries\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def leave(self, member: discord.Member) -> int:\n if member.id not in self.participants:\n raise GiveawayError(\"You are not a participant of this giveaway.\")\n\n self.participants = [\n participant for participant in self.participants if participant != member.id\n ]\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def _end(self):\n await self.bot.pool.execute(\n \"UPDATE giveaways SET ended = $1, winners = $2 WHERE guild = $3 AND channel = $4 AND message = $5\",\n True,\n self.winners,\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n\n async def end(self):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return await self._end()\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(self.winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_host:\n await self.dm_host(guild, winners, config.dm_host_message)\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.end_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def reroll(self, winner_count: int):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.reroll_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def cancel(self):\n await self.bot.pool.execute(\n \"\"\"DELETE FROM giveaways WHERE guild = $1 AND channel = $2 AND message = $3\"\"\",\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n if self.extra_message_id is not None:\n channel = self.bot.get_channel(self.channel_id)\n if channel is not None:\n await channel.get_partial_message(self.extra_message_id).delete() # type: ignore\n\n async def dm_host(\n self, guild: discord.Guild, winners: List[discord.Member], message: str\n ) -> None:\n host = await self.bot.get_or_fetch_member(guild, self.host_id)\n if not host:\n return\n\n description = safe_format(\n message,\n winners=\", \".join(winner.mention for winner in winners)\n if winners\n else \"No Winners\",\n prize=bold(self.prize),\n )\n\n embed = discord.Embed(\n title=f\"Your giveaway for {self.prize} has ended!\"[:256],\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await host.send(embed=embed, view=view)\n\n async def dm_winners(self, message: str, winners: List[discord.Member]) -> None:\n for winner in winners:\n description = safe_format(\n message, winner=winner.mention, prize=bold(self.prize)\n )\n\n embed = discord.Embed(\n title=\"You won!\",\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await winner.send(embed=embed, view=view)\n\n async def pick_winners(\n self, count: int, guild: discord.Guild\n ) -> List[discord.Member]:\n winners = []\n\n participants = self.participants.copy()\n\n while count > 0 and participants:\n member_id = random.choice(participants)\n member = await self.bot.get_or_fetch_member(guild, member_id)\n if member is not None and member not in winners:\n try:\n await self.check_requirements(member)\n except GiveawayError:\n pass\n else:\n winners.append(member)\n count -= 1\n\n participants.remove(member_id)\n\n return winners\n\n def get_end_embed(self, guild: discord.Guild, config: GuildConfig) -> discord.Embed:\n description = (\n f\"This giveaway has ended!\\n\"\n f\"Hosted By: <@!{self.host_id}>\\n\"\n f\"Winners: {', '.join(f'<@!{winner_id}>' for winner_id in self.winners) if self.winners else 'No Winners'}\\n\"\n f\"Ended: {discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='R')} ({discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='f')})\\n\"\n )\n if self.donor_id:\n description += f\"Donor: <@!{self.donor_id}>\\n\"\n embed = discord.Embed(\n title=self.prize,\n description=description,\n colour=config.color,\n timestamp=self.ends,\n )\n embed.set_footer(\n text=f\"{self.winner_count} winner(s) • Ended\",\n icon_url=guild.icon or self.bot.user.display_avatar,\n )\n\n requirements = \"\"\n if self.required_roles:\n requirements += f\"Required Roles: {', '.join(f'<@&{role_id}>' for role_id in self.required_roles)}\\n\"\n if self.bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(f'<@&{role_id}>' for role_id in self.bypass_roles)}\\n\"\n if self.blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(f'<@&{role_id}>' for role_id in self.blacklisted_roles)}\\n\"\n if self.messages_required:\n requirements += f\"Messages Required: **{self.messages_required}** message(s) (5s cooldown)\\n\"\n if self.allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{cid}>' for cid in self.allowed_message_channels)}\\n\"\n if self.amari:\n requirements += f\"Amari Level: {self.amari}\\n\"\n if self.weekly_amari:\n requirements += f\"Weekly Amari: {self.weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if self.multiplier_roles:\n multiplier_roles = \"\\n\".join(\n [\n f\"- {multiplier_entries}x ・ <@&{multiplier_role}>\"\n for multiplier_role, multiplier_entries in self.multiplier_roles.items()\n ]\n )\n embed.add_field(name=\"Bonus Entries\", value=multiplier_roles, inline=False)\n\n return embed" }, { "identifier": "GiveawayAction", "path": "models/giveaways.py", "snippet": "class GiveawayAction(Enum):\n START = 0\n END = 1\n REROLL = 2\n CANCEL = 3\n\n def __str__(self) -> str:\n if self == GiveawayAction.START:\n return \"Started\"\n elif self == GiveawayAction.END:\n return \"Ended\"\n elif self == GiveawayAction.REROLL:\n return \"Rerolled\"\n else:\n return \"Cancelled\"" }, { "identifier": "GiveawayError", "path": "utils/exceptions.py", "snippet": "class GiveawayError(Exception):\r\n \"\"\"Error raised in a giveaway.\"\"\"\r" }, { "identifier": "BonusRolesTransformer", "path": "utils/transformers.py", "snippet": "class BonusRolesTransformer(app_commands.Transformer):\r\n async def transform(\r\n self, interaction: Interaction, value: str\r\n ) -> Dict[discord.Role, int]:\r\n roles_string = value.split()\r\n roles: Dict[discord.Role, int] = {}\r\n\r\n ctx = await commands.Context.from_interaction(interaction)\r\n\r\n for multiplier_roles_role_string in roles_string:\r\n if \":\" not in multiplier_roles_role_string:\r\n raise InvalidRolesPassed(\r\n \"You must use `:` to split the role and bonus entries.\"\r\n )\r\n try:\r\n (\r\n role_string,\r\n multiplier_roles_entries,\r\n ) = multiplier_roles_role_string.split(\":\")\r\n except ValueError:\r\n raise InvalidRolesPassed(\"Too many `:` found, expected only 1.\")\r\n try:\r\n role = await commands.RoleConverter().convert(ctx, role_string.strip())\r\n except commands.RoleNotFound:\r\n raise InvalidRolesPassed(f\"{role_string!r} is not a valid role.\")\r\n try:\r\n multiplier_roles_entries = int(multiplier_roles_entries)\r\n except ValueError:\r\n raise InvalidRolesPassed(\r\n f\"{multiplier_roles_entries!r} is not a valid number of bonus entries for {role_string}\"\r\n )\r\n\r\n if multiplier_roles_entries > 5:\r\n raise InvalidRolesPassed(\r\n \"A role cannot have more than 5 bonus entries.\"\r\n )\r\n else:\r\n if role_string == \"@everyone\":\r\n raise InvalidRolesPassed(f\"{role_string!r} is not a valid role.\")\r\n roles[role] = multiplier_roles_entries\r\n\r\n return roles\r" }, { "identifier": "RolesTransformer", "path": "utils/transformers.py", "snippet": "class RolesTransformer(app_commands.Transformer):\r\n async def transform(\r\n self, interaction: Interaction, value: str\r\n ) -> List[discord.Role]:\r\n roles_string = value.split()\r\n roles: List[discord.Role] = []\r\n\r\n ctx = await commands.Context.from_interaction(interaction)\r\n\r\n for role_string in roles_string:\r\n try:\r\n role = await commands.RoleConverter().convert(ctx, role_string.strip())\r\n except commands.RoleNotFound:\r\n raise InvalidRolesPassed(f\"{role_string!r} is not a valid role.\")\r\n\r\n else:\r\n if role_string == \"@everyone\":\r\n raise InvalidRolesPassed(f\"{role_string!r} is not a valid role.\")\r\n roles.append(role)\r\n\r\n return roles[:5]\r" }, { "identifier": "TextChannelsTransformer", "path": "utils/transformers.py", "snippet": "class TextChannelsTransformer(app_commands.Transformer):\r\n async def transform(\r\n self, interaction: Interaction, value: str\r\n ) -> List[discord.TextChannel]:\r\n channels_string = value.split()\r\n channels: List[discord.TextChannel] = []\r\n\r\n ctx = await commands.Context.from_interaction(interaction)\r\n\r\n for channel_string in channels_string:\r\n try:\r\n role = await commands.TextChannelConverter().convert(\r\n ctx, channel_string.strip()\r\n )\r\n except commands.RoleNotFound:\r\n raise InvalidChannelPassed(\r\n f\"{channel_string!r} is not a valid channel.\"\r\n )\r\n\r\n else:\r\n channels.append(role)\r\n\r\n return channels[:5]\r" }, { "identifier": "TimeTransformer", "path": "utils/transformers.py", "snippet": "class TimeTransformer(app_commands.Transformer):\r\n async def transform(\r\n self, interaction: Interaction, argument: str\r\n ) -> datetime.datetime:\r\n matches = TIME_REGEX.findall(argument.lower())\r\n delta = datetime.timedelta()\r\n\r\n for value, unit in matches:\r\n try:\r\n seconds = TIME_DICT[unit] * float(value)\r\n delta += datetime.timedelta(seconds=seconds)\r\n except KeyError:\r\n raise InvalidTime(\r\n (\r\n f\"Invalid time unit {unit!r}. \"\r\n f\"Please provide a valid time unit such as 'h' for hours, 'm' for minutes, 's' for seconds, or 'd' for days. \"\r\n f\"Examples of valid input include: 12h, 15h2m, 1d, etc.\"\r\n ),\r\n )\r\n except ValueError:\r\n raise InvalidTime(\r\n f\"Invalid value {value!r} provided. Please provide a valid number.\",\r\n )\r\n\r\n if (\r\n delta.total_seconds() < 10 or delta.total_seconds() > 1209600\r\n ): # 10 seconds and 2 weeks in seconds\r\n raise InvalidTime(\r\n \"The time duration must be greater than 10 seconds and less than 2 weeks.\",\r\n )\r\n\r\n current_time = datetime.datetime.now(datetime.timezone.utc)\r\n\r\n return current_time + delta\r" }, { "identifier": "Interaction", "path": "utils/tree.py", "snippet": "class CommandTree(app_commands.CommandTree):\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r" } ]
import datetime import discord from collections import ChainMap from typing import Dict, List, Optional, Tuple from discord import app_commands from discord.app_commands import Transform from discord.ext import commands from bot import Giftify from models.giveaway_settings import ChannelConfig from models.giveaways import Giveaway, GiveawayAction from utils.exceptions import GiveawayError from utils.transformers import ( BonusRolesTransformer, RolesTransformer, TextChannelsTransformer, TimeTransformer, ) from utils.tree import Interaction
11,054
class GiveawayStart(commands.GroupCog): """A cog for starting giveaways.""" bot: Giftify @app_commands.command(name="start") @app_commands.describe( prize="The prize of the giveaway.", duration="The duration of the giveaway.", winners="The number of winners for the giveaway.", required_roles="The roles required to participate in the giveaway.", blacklisted_roles="The roles not allowed to participate in the giveaway.", bypass_roles="The roles that can bypass participation restrictions", multiplier_roles="Use the format <role:number_of_multiplier_roles_entries> split using a ':' (colon).", messages_required="The number of messages required to join the giveaway.", allowed_message_channels="The channels where users are allowed to send messages.", amari="The amari level required for the giveaway.", weekly_amari="The weekly amari xp required for the giveaway.", no_defaults="Flag to exclude default settings for the giveaway.", image="The attached image for the giveaway.", ping="Wheter to ping the server pingrole when the giveaway starts.", donor="The donating member for the giveaway.", message="The message to accompany the giveaway.", ) @app_commands.checks.bot_has_permissions( embed_links=True, send_messages=True, view_channel=True, add_reactions=True ) @app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id)) async def giveaway_start( self, interaction: Interaction,
class GiveawayStart(commands.GroupCog): """A cog for starting giveaways.""" bot: Giftify @app_commands.command(name="start") @app_commands.describe( prize="The prize of the giveaway.", duration="The duration of the giveaway.", winners="The number of winners for the giveaway.", required_roles="The roles required to participate in the giveaway.", blacklisted_roles="The roles not allowed to participate in the giveaway.", bypass_roles="The roles that can bypass participation restrictions", multiplier_roles="Use the format <role:number_of_multiplier_roles_entries> split using a ':' (colon).", messages_required="The number of messages required to join the giveaway.", allowed_message_channels="The channels where users are allowed to send messages.", amari="The amari level required for the giveaway.", weekly_amari="The weekly amari xp required for the giveaway.", no_defaults="Flag to exclude default settings for the giveaway.", image="The attached image for the giveaway.", ping="Wheter to ping the server pingrole when the giveaway starts.", donor="The donating member for the giveaway.", message="The message to accompany the giveaway.", ) @app_commands.checks.bot_has_permissions( embed_links=True, send_messages=True, view_channel=True, add_reactions=True ) @app_commands.checks.cooldown(1, 5, key=lambda i: (i.guild, i.user.id)) async def giveaway_start( self, interaction: Interaction,
duration: Transform[datetime.datetime, TimeTransformer],
8
2023-11-09 15:00:15+00:00
16k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test\")\n\n train_dataset = MultiJSFDataset(train_root, max_seq, random_seq)\n # val_dataset = JSFDataset(val_root, max_seq, random_seq)\n test_dataset = MultiJSFDataset(test_root, max_seq, random_seq)\n\n return train_dataset, test_dataset" }, { "identifier": "CoCoformer", "path": "model/CoCoFormer.py", "snippet": "class CoCoformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(CoCoformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n\n # past layer of chord\n self.cpast_layer_dmodel = d_model\n self.cpast_layer_nhead = 8\n self.cpast_dim_forward = 256\n self.cpast_layer_max_seq = 256\n self.cpast_layer_nlayers = 1\n\n # past layer of beats\n self.bpast_layer_dmodel = d_model\n self.bpast_layer_nhead = 8\n self.bpast_dim_forward = 256\n self.bpast_layer_max_seq = 1024\n self.bpast_layer_nlayers = 1\n\n # Input embedding\n self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)\n self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)\n # Positional encoding\n self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)\n self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)\n\n # Base transformer\n if not self.rpr:\n # To make a decoder-only transformer we need to use masked encoder layers\n # Dummy decoder to essentially just return the encoder output\n encoder_norm = LayerNorm(self.d_model)\n encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout)\n encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)\n encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,\n self.b_max_seq, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy\n )\n # RPR Transformer\n elif self.rpr:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,\n er_len=self.max_seq)\n encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout, er_len=self.max_seq)\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,\n self.c_max_seq, self.b_max_seq, encoder_norm)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n\n # Final output is a softmaxed linear layer\n # TODO: verify the size of linear\n self.Norm1 = nn.LayerNorm(1024)\n self.ReLU = nn.ReLU()\n self.Norm2 = nn.LayerNorm(181)\n self.Dropout = nn.Dropout(dropout)\n self.transLinear = nn.Linear(256, 256)\n self.Wout1 = nn.Linear(self.d_model, 1024)\n self.Wout2 = nn.Linear(1024, 1024)\n self.Wout3 = nn.Linear(1024, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the transformer model.\"\"\"\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n # forward\n def forward(self, x1, x2, x3, mask=True):\n\n args = parse_train_args()\n # for pure-Transformer:\n # Transformer module:\n if mask is True:\n if args.gpu[0] != -1:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cuda(device=args.gpu[0])\n else:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cpu()\n else:\n mask = None\n # Input shape is (max_seq, batch_size, d_model)\n x_n = self.n_embedding(x1)\n x_n = x_n.permute(1, 0, 2)\n x_n = self.n_positional_encoding(x_n)\n\n x_c = self.c_embedding(x2)\n x_c = x_c.permute(1, 0, 2)\n x_c = self.c_positional_encoding(x_c)\n\n x_b = self.b_embedding(x3)\n x_b = x_b.permute(1, 0, 2)\n x_b = self.b_positional_encoding(x_b)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=torch.cat((x_n, x_c, x_b), dim=0), tgt=x_n,\n src_mask=mask)\n # x_out = self.transformer(src=x_transformer, tgt=x_transformer, src_mask=mask)\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n # concat\n # x_concat = torch.cat([x_out, x_out2], dim=1)\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout1(x_out))))\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout2(y))))\n y = self.Wout3(y)\n # y = self.Wout2(y)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y\n\n # unconditional generate\n def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n\n print(\"Generating sequence of max length:\", target_seq_length)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n while cur_i < target_seq_length:\n # gen_seq_batch = gen_seq.clone()\n y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n\n # Let the transformer decide to end if it wants to\n # if next_token == TOKEN_END:\n # print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n # break\n\n cur_i += 1\n if cur_i % 50 == 0:\n print(cur_i, \"/\", target_seq_length)\n\n return gen_seq[:, :cur_i]\n\n # conditional generate\n def conditional_generate(self, beats, chord, seq, c, bs, ba, bt, bb, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n chord = torch.tensor(chord, device=get_device()).unsqueeze(0)\n beats = torch.tensor(beats, device=get_device()).unsqueeze(0)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n primer = torch.tensor([c[0], bs[0], seq[0], ba[0]])\n primer_num = 1 # decide key to add\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n # first input: C B N B\n cur_i_n = 1\n cur_i_b = 2\n cur_i_c = 1\n check_error = 0\n pbar = tqdm(total=len(seq)*9)\n while cur_i < target_seq_length:\n a = gen_seq[..., :cur_i].cpu().numpy()\n # gen_seq_batch = gen_seq.clone()\n # print(\"input:\", gen_seq[..., :cur_i], chord[..., :cur_i_c], beats[..., :cur_i_b])\n y = self.softmax(self.forward(gen_seq[..., :cur_i], chord[..., :cur_i_c],\n beats[..., :cur_i_b]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n # check for y\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n if check_error > 256:\n print(\"error! regenerate!\")\n return False\n # next token is the next token\n if cur_i % 9 == 1: # token is chord, next token must be beats\n if not 178 < next_token < 191: # if it is not beat\n check_error += 1\n continue\n if cur_i % 9 in [2, 4, 6, 8]: # this token must be beat, next token must be note\n if not next_token < 129: # if it is not note\n check_error += 1\n continue\n else: # this token must be note, next token must be chord or beat\n if not 128 < next_token < 191: # if it is chord or beat\n check_error += 1\n continue\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n cur_i += 1\n pbar.update(1)\n cur_i_n += 1\n if cur_i % 9 == 0 and primer_num < len(seq):\n # add C B_S N_S B_A\n gen_seq[:, cur_i] = chord.squeeze()[primer_num]\n gen_seq[:, cur_i+1] = torch.tensor(bs[primer_num], device=get_device())\n gen_seq[:, cur_i+2] = torch.tensor(seq[primer_num], device=get_device())\n gen_seq[:, cur_i+3] = torch.tensor(ba[primer_num], device=get_device())\n primer_num += 1\n cur_i += 4\n pbar.update(4)\n cur_i_n += 1\n cur_i_b += 2\n cur_i_c += 1\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if cur_i % 9 != 0 and cur_i % 9 != 4 and primer_num < len(seq) + 1:\n # add B\n gen_seq[:, cur_i] = beats.squeeze()[cur_i_b]\n cur_i_b += 1\n cur_i_n += 1\n cur_i += 1\n pbar.update(1)\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if primer_num == len(seq) and cur_i == len(seq) * 9:\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n # print(cur_i, \"/\", target_seq_length)\n\n print(\"all errors:%d\" % check_error)\n return gen_seq[:, :cur_i]" }, { "identifier": "Discriminator", "path": "model/CoCoFormer.py", "snippet": "class Discriminator(nn.Module):\n \"\"\"\n to judge the true sample or fake\n return fake or true\n \"\"\"\n def __init__(self, input_emb=1, d_model=256, nhead=4, d_ff=512, dropout=0.5, out_emb=1024):\n super(Discriminator, self).__init__()\n self.linear1 = nn.Linear(input_emb, d_model)\n self.transformer = TransformerEncoderLayer(d_model, nhead, d_ff, dropout)\n self.linear2 = nn.Linear(d_model, out_emb)\n self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.Norm1 = nn.LayerNorm(d_model)\n self.Norm2 = nn.LayerNorm(out_emb)\n self.dropout = nn.Dropout(dropout)\n self.sigmoid = nn.Sigmoid()\n self.loss = nn.BCELoss()\n\n def forward(self, x, labels):\n x = x.float().unsqueeze(2)\n x = self.dropout(self.Norm1(self.linear1(x)))\n x = self.transformer(x)\n logits = self.dropout(self.Norm2(self.linear2(x)))\n logits = self.sigmoid(self.relu(self.maxpool(logits)))\n logits = logits.reshape(logits.shape[0] * logits.shape[1], -1)\n labels = labels.reshape(logits.shape[0] * logits.shape[1], -1)\n loss = self.loss(logits, labels)\n\n # import numpy as np\n # logits = logits.cpu().detach().numpy()\n # labels = labels.cpu().detach().numpy()\n # loss = []\n # for i in logits:\n # loss.append(np.log(1-1/(1+np.exp(i[0]))))\n output = (loss, logits)\n\n return output\n\n def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)" }, { "identifier": "PureTransformer", "path": "model/CoCoFormer.py", "snippet": "class PureTransformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(PureTransformer, self).__init__()\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n # Input embedding\n self.embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n\n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, mask=True):\n\n if mask is True:\n mask = self.transformer.generate_square_subsequent_mask(x[0].shape[1]).to(get_device())\n else:\n mask = None\n\n x = self.embedding(x)\n\n # Input shape is (max_seq, batch_size, d_model)\n x = x.permute(1, 0, 2)\n\n x = self.positional_encoding(x)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=x, tgt=x, src_mask=mask)\n\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n y = self.Wout(x_out)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n\n print(SEPERATOR)\n print(\"input_dir:\", args.input_dir)\n print(\"output_dir:\", args.output_dir)\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n print(\"max_sequence:\", args.max_sequence)\n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n o_stream.write(\"max_sequence: \" + str(args.max_sequence) + \"\\n\")\n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n # with torch.no_grad():\n # y1 = model(x[1])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model(x[0])\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n loss2.backward()\n # out = args.loss[0] * loss1 + args.loss[1] * loss2\n\n opt.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train total loss:\", float(loss2),\n \"Time (s):\", time_took)\n\n return" }, { "identifier": "train_with_adv", "path": "utilities/run_model.py", "snippet": "def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,\n lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n out = -1\n start_epoch = 5\n model.train()\n model_disc.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n opt_disc.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n with torch.no_grad():\n y1 = model.module(x[1][0], x[1][1], x[1][2])\n y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n # discriminator model loss:\n if args.gpu[0] != -1:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])\n else:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)\n\n softmax = nn.Softmax(dim=-1)\n d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)\n d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)\n loss3 = d_fake_loss + d_real_loss\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n # out = loss3\n out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3\n\n out.backward()\n opt.step()\n opt_disc.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n if lr_disc_scheduler is not None:\n lr_disc_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt_disc),\n \"Train total loss:\", float(out), \"Train loss1:\", float(loss1), \"Train loss2:\", float(loss2),\n \"Train loss3:\", float(loss3), \"Time (s):\", time_took)\n\n return" }, { "identifier": "eval_model", "path": "utilities/run_model.py", "snippet": "def eval_model(model, dataloader, loss):\n\n model.eval()\n args = parse_train_args()\n avg_acc = -1\n avg_loss = -1\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n sum_loss = 0.0\n sum_acc = 0.0\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n x[i] = x[i].cpu()\n tgt[i] = tgt[i].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n\n # with torch.no_grad():\n # y1 = model(x[0])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n out = loss2\n\n sum_acc += float(compute_jsf_accuracy(y2, tgt))\n\n # y = y.reshape(y.shape[0] * y.shape[1], -1)\n # tgt = tgt.flatten()\n\n # out = loss.forward(y, tgt)\n\n sum_loss += float(out)\n\n avg_loss = sum_loss / n_test\n avg_acc = sum_acc / n_test\n\n return avg_loss, avg_acc" }, { "identifier": "get_metrics", "path": "utilities/run_model.py", "snippet": "def get_metrics(model, dataloader):\n \"\"\"\n Calculate TER: token error rate\n \"\"\"\n args = parse_eval_args()\n model.eval()\n # TER\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n c_acc, Ns_acc, Bs_acc, Na_acc, Ba_acc, Nt_acc, Bt_acc, Nb_acc, Bb_acc = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ter = []\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n\n y = model.module(x[0][0], x[0][1], x[0][2])\n # TER\n ter.append(compute_jsf_ter(y, tgt))\n\n for i in ter:\n c_acc += i[0]\n Bs_acc += i[1]\n Ns_acc += i[2]\n Ba_acc += i[3]\n Na_acc += i[4]\n Bt_acc += i[5]\n Nt_acc += i[6]\n Bb_acc += i[7]\n Nb_acc += i[8]\n TER = [c_acc / n_test, Bs_acc / n_test, Ns_acc / n_test, Ba_acc / n_test, Na_acc / n_test,\n Bt_acc / n_test, Nt_acc / n_test, Bb_acc / n_test, Nb_acc / n_test]\n # clear nan , or np.mean will only be nan if one is nan\n return TER" }, { "identifier": "train_with_pure_transformer", "path": "utilities/run_model.py", "snippet": "def train_with_pure_transformer(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n\n x = batch[0][0][0].to(args.gpu[0])\n tgt = batch[1][0][0].to(args.gpu[0])\n\n y = model(x)\n\n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n out = loss.forward(y, tgt)\n\n out.backward()\n opt.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train loss:\", float(out), \"Time (s):\", time_took)\n\n return" }, { "identifier": "params", "path": "utilities/run_model.py", "snippet": "def params(dataloader, model, model_disc):\n\n args = parse_train_args()\n model.eval()\n for batch_num, batch in enumerate(dataloader):\n flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),\n batch[0][0][1].cuda(args.gpu[0]),\n batch[0][0][2].cuda(args.gpu[0]))\n )\n print('flops:', flops, 'params:', params)\n break" } ]
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
10,976
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0])
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0])
params(train_loader, model, model_disc)
17
2023-11-01 08:33:08+00:00
16k
tiendatnguyen-vision/Orbit-symmetrize
RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemented # The continuous generators\n self.discrete_generators = NotImplemented # The discrete generators\n self.z_scale = None # For scale noise for sampling elements\n self.is_orthogonal = None\n self.is_permutation = None\n self.d = NotImplemented # The dimension of the base representation\n self.device = torch.device('cpu')\n self.args = None\n\n def init(self, *args):\n \"\"\" Initialize the group object. \"\"\"\n # get the dimension of the base group representation\n if self.d is NotImplemented:\n if (self.lie_algebra is not NotImplemented) and \\\n len(self.lie_algebra) > 0:\n self.d = self.lie_algebra[0].size(-1)\n if (self.discrete_generators is not NotImplemented) and \\\n len(self.discrete_generators) > 0:\n self.d = self.discrete_generators[0].size(-1)\n\n self.args = args\n\n if self.lie_algebra is NotImplemented:\n self.lie_algebra = torch.zeros((0, self.d, self.d), device=self.device)\n if self.discrete_generators is NotImplemented:\n self.discrete_generators = torch.zeros((0, self.d, self.d), device=self.device)\n\n self.to(self.device)\n\n # set orthogonal flag automatically if not specified\n if self.is_permutation:\n self.is_orthogonal = True\n if self.is_orthogonal is None:\n self.is_orthogonal = True\n if len(self.lie_algebra) != 0:\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra])\n self.is_orthogonal &= rel_err(-A_dense.transpose(2, 1), A_dense) < 1e-6\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_orthogonal &= rel_err(h_dense.transpose(2, 1)@h_dense, Id[None]) < 1e-6\n\n # set regular flag automatically if not specified\n if self.is_orthogonal and (self.is_permutation is None):\n self.is_permutation = True\n # no infinitesmal generators and all rows have one 1\n self.is_permutation &= (len(self.lie_algebra) == 0)\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_permutation &= (((h_dense-1).abs()<1e-6).long().sum(-1) == 1).all()\n\n def exp(self, A):\n \"\"\" Matrix exponential \"\"\"\n return torch.linalg.matrix_exp(A)\n\n def num_constraints(self):\n \"\"\" Number of constraints to solve for the group \"\"\"\n return len(self.lie_algebra)+len(self.discrete_generators)\n\n def sample(self):\n \"\"\"Draw a sample from the group (not necessarily Haar measure)\"\"\"\n return self.samples(1)[0]\n\n def samples(self, N):\n \"\"\" Draw N samples from the group (not necessarily Haar measure)\"\"\"\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra]) \\\n if len(self.lie_algebra) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators]) \\\n if len(self.discrete_generators) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n z = torch.randn(N, A_dense.size(0), device=self.device)\n if self.z_scale is not None:\n z *= self.z_scale\n k = torch.randint(-MAX_POWER, MAX_POWER+1, (N, h_dense.size(0), 3), device=self.device)\n return noise2samples(z, k, A_dense, h_dense)\n\n def check_valid_group_elems(self, g):\n \"\"\" Check that the group elements are valid \"\"\"\n return True\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n outstr = f\"{self.__class__}\"\n if self.args:\n outstr += '('+''.join(repr(arg) for arg in self.args)+')'\n return outstr\n\n def __eq__(self, G2): # TODO: more permissive by checking that spans are equal?\n return repr(self) == repr(G2)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __lt__(self, other):\n \"\"\" For sorting purposes only \"\"\"\n return hash(self) < hash(other)\n\n def __mul__(self, other):\n return DirectProduct(self, other)\n\n def forward(self):\n \"\"\" Forward method, unused. \"\"\"\n return None\n\n def to(self, *args, **kwargs):\n \"\"\" Move the group to the specified device \"\"\"\n if isinstance(self.lie_algebra, torch.Tensor):\n self.lie_algebra = self.lie_algebra.to(*args, **kwargs)\n elif isinstance(self.lie_algebra, list):\n self.lie_algebra = [Ai.to(*args, **kwargs) for Ai in self.lie_algebra]\n if isinstance(self.discrete_generators, torch.Tensor):\n self.discrete_generators = self.discrete_generators.to(*args, **kwargs)\n elif isinstance(self.discrete_generators, list):\n self.discrete_generators = [hi.to(*args, **kwargs) for hi in self.discrete_generators]\n if self.z_scale is not None:\n self.z_scale = self.z_scale.to(*args, **kwargs)\n self.device = torch.empty(0).to(*args, **kwargs).device\n return self" }, { "identifier": "LinearOperator", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py", "snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> def mv(v):\n ... return torch.tensor([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(torch.ones(2))\n tensor([ 2., 3.])\n >>> A * torch.ones(2)\n tensor([ 2., 3.])\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n\n obj = super(LinearOperator, cls).__new__(cls)\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n return obj\n\n def __init__(self):\n super().__init__()\n self.ndim = 2\n self.dtype = None\n self.shape = None\n self.device = None\n\n def init(self, dtype, shape, device):\n \"\"\" Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if dtype is None:\n dtype = torch.float # force float 32\n else:\n if not isinstance(dtype, torch.dtype):\n dtype = torch_dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(f\"invalid shape {(shape,)} (must be 2-d)\")\n\n self.dtype = dtype\n self.shape = torch.Size(shape)\n self.device = torch_device(device)\n\n def size(self, dim=None):\n \"\"\" Return the size of this LinearOperator.\n This is a synonym for ``shape``.\n \"\"\"\n return self.shape if dim is None else self.shape[dim]\n\n def _matmat(self, V):\n \"\"\" Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])\n\n def _matvec(self, v):\n \"\"\" Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(v.reshape(-1, 1))\n\n def matvec(self, v):\n \"\"\" Matrix-vector multiplication.\n Performs the operation y=A*v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n if v.shape != (N,) and v.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(v)\n\n if v.ndim == 1:\n y = y.reshape(M)\n elif v.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, v):\n \"\"\" Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the v argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if v.shape != (M,) and v.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(v)\n\n if v.ndim == 1:\n y = y.reshape(N)\n elif v.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, v):\n \"\"\" Default implementation of _rmatvec; defers to adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n return self.H().matvec(v)\n\n def matmat(self, V):\n \"\"\" Matrix-matrix multiplication.\n Performs the operation y=A*V where A is an MxN linear\n operator and V dense N*K matrix or ndarray.\n Parameters\n ----------\n V : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the V argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(1):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._matmat(V)\n return Y\n\n def rmatmat(self, V):\n \"\"\" Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * V where A is an MxN linear\n operator and V is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n V : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(0):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._rmatmat(V)\n return Y\n\n def _rmatmat(self, V):\n \"\"\" Default implementation of _rmatmat defers to rmatvec or adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])\n return self.H().matmat(V)\n\n def forward(self, v):\n \"\"\" Matrix-vector or matrix-matrix multiplication. \"\"\"\n return self*v\n\n def __mul__(self, v):\n return self.dot(v)\n\n def dot(self, v):\n \"\"\" Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n v : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Av : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(v, LinearOperator):\n return _ProductLinearOperator(self, v)\n if torch.is_tensor(v):\n if v.ndim == 0:\n return _ScaledLinearOperator(self, v)\n if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:\n return self.matvec(v)\n if v.ndim == 2:\n return self.matmat(v)\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')\n\n def __matmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if isscalar(x):\n return _ScaledLinearOperator(self, x)\n return NotImplemented\n\n def __pow__(self, p):\n if isscalar(p):\n return _PowerLinearOperator(self, p)\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n if torch.is_tensor(x) and x.ndim == 2:\n return _SumLinearOperator(self, Lazy(x))\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dtype = 'unspecified dtype'\n else:\n dtype = 'dtype=' + str(self.dtype)\n\n return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'\n\n def adjoint(self):\n \"\"\" Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n def H(self):\n \"\"\" Hermitian adjoint. \"\"\"\n return self.adjoint()\n\n def transpose(self):\n \"\"\" Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n def t(self):\n \"\"\" Transpose this linear operator. \"\"\"\n return self.transpose()\n\n def _adjoint(self):\n \"\"\" Default implementation of _adjoint; defers to rmatvec. \"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def invt(self):\n \"\"\" Default implementation of inverse transpose; defers to inv + T \"\"\"\n return (self ** -1).transpose()\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return [email protected](self.size(-1), device=self.device)\n\n def to(self, device):\n \"\"\" Move this linear operator to a new device. \"\"\"\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "ConcatLazy", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class ConcatLazy(LinearOperator):\n \"\"\" Produces a linear operator equivalent to concatenating\n a collection of matrices Ms along axis=0 \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n assert all(M.size(0) == Ms[0].size(0) for M in Ms),\\\n f\"Trying to concatenate matrices of different sizes {[M.shape for M in Ms]}\"\n shape = (sum(M.size(0) for M in Ms), Ms[0].size(1))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matmat(self, V):\n return torch.cat([M@V for M in self.Ms])\n\n def _rmatmat(self, V):\n Vs = torch.chunk(V, len(self.Ms))\n return sum(Mi.t()@Vi for Mi, Vi in zip(self.Ms, Vs))\n\n def to_dense(self):\n dense_Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return torch.cat(dense_Ms)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "I", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class I(LinearOperator):\n \"\"\" Identity operator. \"\"\"\n\n def __init__(self, d, device=None):\n super().__init__()\n shape = (d, d)\n self.init(None, shape, device)\n\n def _matmat(self, V): # (c,k)\n return V\n\n def _matvec(self, v):\n return v\n\n def _adjoint(self):\n return self\n\n def invt(self):\n return self" }, { "identifier": "lazify", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazify(x):\n \"\"\" Convert a tensor LinearOperator. \"\"\"\n if isinstance(x, LinearOperator):\n return x\n if torch.is_tensor(x):\n return Lazy(x)\n raise NotImplementedError" }, { "identifier": "densify", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def densify(x):\n \"\"\" Convert a LinearOperator to a dense tensor. \"\"\"\n if isinstance(x, LinearOperator):\n return x.to_dense()\n if torch.is_tensor(x):\n return x\n raise NotImplementedError" }, { "identifier": "LazyJVP", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyJVP(LinearOperator):\n \"\"\" Lazy Jacobian-vector product. \"\"\"\n\n def __init__(self, operator_fn, X, TX):\n super().__init__()\n self.operator_fn = operator_fn\n self.X = X\n self.TX = TX\n self.init(torch.float, operator_fn(X).shape, X.device)\n self.to(self.device)\n\n def vjp(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x)@v, [self.X], [self.TX])[1]\n\n def vjp_T(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x).t()@v, [self.X], [self.TX])[1]\n\n def _matmat(self, V):\n return self.vjp(V)\n\n def _matvec(self, v):\n return self.vjp(v)\n\n def _rmatmat(self, V):\n return self.vjp_T(V)\n\n def to(self, device):\n self.X = self.X.to(device)\n self.TX = self.TX.to(device)\n self.device = self.X.device\n return self" }, { "identifier": "LazyPerm", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyPerm(LinearOperator):\n \"\"\" Lazy permutation. \"\"\"\n\n def __init__(self, perm):\n super().__init__()\n self.perm = perm\n shape = (len(perm), len(perm))\n self.init(None, shape, perm.device)\n\n def _matmat(self, V):\n return V[self.perm]\n\n def _matvec(self, v):\n return v[self.perm]\n\n def _adjoint(self):\n return LazyPerm(torch.argsort(self.perm))\n\n def invt(self):\n return self\n\n def to(self, device):\n self.perm = self.perm.to(device)\n self.device = self.perm.device\n return self" }, { "identifier": "LazyDirectSum", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyDirectSum(LinearOperator):\n \"\"\" Lazy direct sum. \"\"\"\n\n def __init__(self, Ms, multiplicities=None):\n super().__init__()\n self.Ms = Ms\n self.multiplicities = [1 for _ in Ms] if multiplicities is None else multiplicities\n shape = (sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)),\n sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return lazy_direct_matmat(v, self.Ms, self.multiplicities)\n\n def _matmat(self, V): # (n,k)\n return lazy_direct_matmat(V, self.Ms, self.multiplicities)\n\n def _adjoint(self):\n return LazyDirectSum([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyDirectSum([M.invt() for M in self.Ms])\n\n def to_dense(self):\n Ms_all = [M for M, c in zip(self.Ms, self.multiplicities)\n for _ in range(c)]\n Ms_all = [Mi.to_dense() if isinstance(Mi, LinearOperator)\n else Mi for Mi in Ms_all]\n return torch.block_diag(*Ms_all)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKron", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKron(LinearOperator):\n \"\"\" Lazy tensor product. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n eV = torch.movedim(MeV_front, 0, i)\n return eV.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKron([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyKron([M.invt() for M in self.Ms])\n\n def to_dense(self):\n self.to(self.device)\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(torch.kron, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKronsum", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKronsum(LinearOperator):\n \"\"\" Lazy tensor sum. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n dtype = torch.float\n device = get_device(Ms)\n self.init(dtype, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n out = 0*eV\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n M, eV_front = dtype_cast(M, eV_front)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n out, MeV_front = dtype_cast(out, MeV_front)\n out += torch.movedim(MeV_front, 0, i)\n return out.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKronsum([Mi.t() for Mi in self.Ms])\n\n def to_dense(self):\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(kronsum, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n # could also be implemented as follows,\n # but fusing the sum into a single linearOperator is faster\n # def lazy_kronsum(Ms):\n # n = len(Ms)\n # lprod = np.cumprod([1]+[mi.size(-1) for mi in Ms])\n # rprod = np.cumprod([1]+[mi.size(-1) for mi in reversed(Ms)])[::-1]\n # return reduce(lambda a,b: a+b,[lazy_kron([I(lprod[i]),Mi,I(rprod[i+1])])\n # for i,Mi in enumerate(Ms)])\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "lazy_direct_matmat", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazy_direct_matmat(v, Ms, mults):\n \"\"\" Computes the matrix-vector product of a direct sum of matrices\n with a vector. \"\"\"\n k = v.size(1) if len(v.shape) > 1 else 1\n i = 0\n y = []\n for M, multiplicity in zip(Ms, mults):\n i_end = i+multiplicity*M.size(-1)\n elems = M@v[i:i_end][None].reshape(k*multiplicity, M.size(-1)).t()\n y.append(elems.t().reshape(k, multiplicity*M.size(0)).t())\n i = i_end\n y = torch.cat(y) # concatenate over rep axis\n return y" }, { "identifier": "product", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def product(c):\n \"\"\" Product of a list of numbers. \"\"\"\n return reduce(lambda a, b: a*b, c)" }, { "identifier": "orthogonal_complement", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def orthogonal_complement(proj):\n \"\"\" Computes the orthogonal complement to a given matrix proj\"\"\"\n _, S, Vh = torch.linalg.svd(proj, full_matrices=True)\n rank = (S > 1e-5).sum()\n return Vh[rank:].conj().t()" }, { "identifier": "krylov_constraint_solve", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def krylov_constraint_solve(C, tol=1e-5):\n \"\"\" Computes the solution basis Q for the linear constraint CQ=0 and QᵀQ=I\n up to specified tolerance with C expressed as a LinearOperator. \"\"\"\n r = 5\n if C.size(0)*r*2 > 2e9:\n raise RuntimeError(f\"Solns for contraints {C.shape} too large to fit in memory\")\n found_rank = 5\n while found_rank == r:\n r *= 2 # Iterative doubling of rank until large enough to include the full solution space\n if C.size(0)*r > 2e9:\n logging.error(\"Hit memory limits, switching to \"\n \"sample equivariant subspace of size %r\", found_rank)\n break\n Q = krylov_constraint_solve_upto_r(C, r, tol)\n found_rank = Q.size(-1)\n return Q" }, { "identifier": "get_device", "path": "RotatedMNIST/LPS_orbit/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def get_device(operators, devices=None):\n \"\"\" Returns the device of the first operator that has a device attribute. \"\"\"\n if devices is None:\n devices = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':\n return obj.device\n return torch.device('cpu')" } ]
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
11,093
if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M)
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix.""" return densify(self.rho(M)) def drho_dense(self, A): """ A convenience function which returns drho(A) as a dense matrix.""" return densify(self.drho(A)) def constraint_matrix(self): """ Constructs the equivariance constrant matrix (lazily) by concatenating the constraints (ρ(hᵢ)-I) for i=1,...M and dρ(Aₖ) for k=1,..,D from the generators of the symmetry group. """ n = self.size() constraints = [] constraints.extend([lazify(self.rho(h)).to(self.G.device)-I(n, device=self.G.device) \ for h in self.G.discrete_generators]) constraints.extend([lazify(self.drho(A)).to(self.G.device) for A in self.G.lie_algebra]) return ConcatLazy(constraints) if constraints else lazify( torch.zeros((1, n), device=self.G.device)) solcache = {} def equivariant_basis(self): """ Computes the equivariant solution basis for the given representation of size N. Canonicalizes problems and caches solutions for reuse. Output [Q (N,r)] """ if self == Scalar: return torch.ones((1, 1), device=self.G.device) canon_rep, perm = self.canonicalize() invperm = torch.argsort(perm) if canon_rep not in self.solcache: logging.info("%r cache miss", canon_rep) logging.info("Solving basis for %r%s", self, f", for G={self.G}" if self.G is not None else "") C_lazy = canon_rep.constraint_matrix() if C_lazy.size(0)*C_lazy.size(1) > 3e7: # Too large to use SVD result = krylov_constraint_solve(C_lazy) else: C_dense = C_lazy.to_dense() result = orthogonal_complement(C_dense) self.solcache[canon_rep] = result return self.solcache[canon_rep][invperm] def equivariant_projector(self): """ Computes the (lazy) projection matrix P=QQᵀ that projects to the equivariant basis.""" Q = self.equivariant_basis() Q_lazy = lazify(Q) P = Q_lazy@Q_lazy.H() return P def concrete(self): """ Concreteness """ return isinstance(self.G, Group) def __add__(self, other): """ Direct sum (⊕) of representations. """ if isinstance(other, int): if other == 0: return self return self+other*Scalar if both_concrete(self, other): return SumRep(self, other) return DeferredSumRep(self, other) def __radd__(self, other): if isinstance(other, int): if other == 0: return self return other*Scalar+self return NotImplemented def __mul__(self, other): """ Tensor sum (⊗) of representations. """ return mul_reps(self, other) def __rmul__(self, other): return mul_reps(other, self) def __pow__(self, other): """ Iterated tensor product. """ assert isinstance(other, int), \ f"Power only supported for integers, not {type(other)}" assert other >= 0, f"Negative powers {other} not supported" return reduce(lambda a, b: a*b, other*[self], Scalar) def __rshift__(self, other): """ Linear maps from self -> other """ return other*self.t() def __lshift__(self, other): """ Linear maps from other -> self """ return self*other.t() def __lt__(self, other): """ less than defined to disambiguate ordering multiple different representations. Canonical ordering is determined first by Group, then by size, then by hash""" if other == Scalar: return False try: if self.G < other.G: return True if self.G > other.G: return False except (AttributeError, TypeError): pass if self.size() < other.size(): return True if self.size() > other.size(): return False return hash(self) < hash(other) # For sorting purposes only def t(self): """ Dual representation V*, rho*, drho*.""" if isinstance(self.G, Group) and self.G.is_orthogonal: return self return Dual(self) @dispatch def mul_reps(ra, rb: int): """ Product of a scalar and a representation. """ if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M)
rhoinvt = rho.invt() if isinstance(rho, LinearOperator) else torch.linalg.inv(rho).t()
1
2023-11-01 07:19:02+00:00
16k
mbreuss/consistency_trajectory_models_toy_task
ctm_train.py
[ { "identifier": "ConsistencyTrajectoryModel", "path": "ctm/ctm.py", "snippet": "class ConsistencyTrajectoryModel(nn.Module):\n\n def __init__(\n self, \n data_dim: int,\n cond_dim: int,\n sampler_type: str,\n sigma_data: float,\n sigma_min: float,\n sigma_max: float,\n conditioned: bool,\n device: str,\n use_teacher: bool = False,\n solver_type: str = 'heun',\n n_discrete_t: int = 20,\n lr: float = 1e-4,\n rho: int = 7,\n diffusion_lambda: float = 1.0,\n gan_lambda: float = 0.0,\n ema_rate: float = 0.999,\n n_sampling_steps: int = 10,\n sigma_sample_density_type: str = 'loglogistic',\n ) -> None:\n super().__init__()\n self.use_gan = False\n self.ema_rate = ema_rate\n self.diffusion_lambda = diffusion_lambda\n self.gan_lambda = gan_lambda\n self.n_discrete_t = n_discrete_t\n self.model = ConsistencyTrajectoryNetwork(\n x_dim=data_dim,\n hidden_dim=256,\n time_embed_dim=4,\n cond_dim=cond_dim,\n cond_mask_prob=0.0,\n num_hidden_layers=4,\n output_dim=data_dim,\n dropout_rate=0.1,\n cond_conditional=conditioned\n ).to(device)\n # we need an ema version of the model for the consistency loss\n self.target_model = copy.deepcopy(self.model)\n for param in self.target_model.parameters():\n param.requires_grad = False\n # we further can use a teacher model for the solver\n self.use_teacher = use_teacher\n if self.use_teacher:\n self.teacher_model = copy.deepcopy(self.model)\n self.device = device\n self.sampler_type = sampler_type\n # use the score wrapper \n self.sigma_data = sigma_data\n self.sigma_min = sigma_min\n self.sigma_max = sigma_max\n self.rho = rho\n self.n_sampling_steps = n_sampling_steps\n self.solver_type = solver_type\n self.sigma_sample_density_type = sigma_sample_density_type\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)\n self.epochs = 0\n \n def diffusion_wrapper(self, model, x, cond, t, s):\n \"\"\"\n Performs the diffusion wrapper for the given model, x, cond, and t.\n Based on the conditioning from EDM Karras et al. 2022.\n\n Args:\n model (torch.nn.Module): The neural network model to be used for the diffusion process.\n x (torch.Tensor): The input tensor to the model.\n cond (torch.Tensor): The conditioning tensor to be used during the diffusion process.\n t (float): The time step for the diffusion process.\n\n Returns:\n torch.Tensor: The scaled output tensor after applying the diffusion wrapper to the model.\n \"\"\"\n c_skip = self.sigma_data**2 / (\n t ** 2 + self.sigma_data**2\n )\n c_out = (\n t * self.sigma_data / (t**2 + self.sigma_data**2) ** 0.5\n )\n # these two are not mentioned in the paper but they use it in their code\n c_in = 1 / (t**2 + self.sigma_data**2) ** 0.5\n \n t = 0.25 * torch.log(t + 1e-40)\n c_in = append_dims(c_in, x.ndim)\n c_out = append_dims(c_out, x.ndim)\n c_skip = append_dims(c_skip, x.ndim)\n\n diffusion_output = model(c_in * x, cond, t, s)\n scaled_output = c_out * diffusion_output + c_skip * x\n \n return scaled_output\n \n def cmt_wrapper(self, model, x, cond, t, s):\n \"\"\"\n Applies the new cmt wrapper from page 4 of https://openreview.net/attachment?id=ymjI8feDTD&name=pdf\n\n Args:\n model (torch.nn.Module): The neural network model to be used for the diffusion process.\n x (torch.Tensor): The input tensor to the model.\n cond (torch.Tensor): The conditioning tensor to be used during the diffusion process.\n t (float): The time step for the diffusion process.\n s: (float): the target noise level for the diffusion process.\n\n Returns:\n torch.Tensor: The scaled output tensor after applying the diffusion wrapper to the model.\n \"\"\"\n if len(t.shape) == 1:\n t = t.unsqueeze(1)\n if len(s.shape) == 1:\n s = s.unsqueeze(1)\n G_0 = (s / t) * x + (1 - s /t) * self.diffusion_wrapper(model, x, cond, t, s)\n \n return G_0\n \n def _update_ema_weights(self):\n \"\"\"\n Updates the exponential moving average (EMA) weights of the target model.\n\n The method performs the following steps:\n 1. Gets the state dictionary of the self.model (source model).\n 2. Updates the EMA weights for each parameter in the target model by computing the weighted average between \n the corresponding parameter in the target model and the parameter in the source model, using the EMA rate parameter.\n \"\"\"\n # Get the state dictionary of the current/source model\n state_dict = self.model.state_dict()\n # Get the state dictionary of the target model\n target_state_dict = self.target_model.state_dict()\n\n # Iterate over the parameters in the target model state dictionary\n for key in state_dict:\n if key in target_state_dict:\n # Update the EMA weights for each parameter\n target_param_data = target_state_dict[key].data\n model_param_data = state_dict[key].data\n target_state_dict[key].data.copy_((1 - self.ema_rate) * target_param_data + self.ema_rate * model_param_data)\n\n # You can optionally load the updated state dict into the target model, if necessary\n # self.target_model.load_state_dict(target_state_dict)\n\n def train_step(self, x, cond):\n \"\"\"\n Main training step method to compute the loss for the Consistency Trajectory Model.\n The loss consists of three parts: the consistency loss, the diffusion loss, and the GAN loss (optional).\n The first part is similar to Song et al. (2023) and the second part is similar to Karras et al. (2022).\n The GAN Part is not implemented right now, since its not attractive for Imitation Learning applications.\n \"\"\"\n self.model.train()\n t_ctm, s, u = self.sample_noise_levels(shape=(len(x),), N=self.n_discrete_t, device=self.device)\n noise = torch.randn_like(x)\n # get the noise samples\n x_t = x + noise * append_dims(t_ctm, x.ndim)\n # use the solver if we have a teacher model otherwise use the euler method\n solver_target = self.solver(x_t, cond, t_ctm, u)\n\n # compute the cmt consistency loss\n cmt_loss = self.ctm_loss(x_t, cond, t_ctm, s, u, solver_target)\n \n # compute the diffusion loss\n # sample noise for the diffusion loss from the continuous noise distribution\n if self.diffusion_lambda > 0:\n t_sm = self.make_sample_density()(shape=(len(x),), device=self.device)\n x_t_sm = x + noise * append_dims(t_sm, x.ndim)\n diffusion_loss = self.diffusion_loss(x, x_t_sm, cond, t_sm)\n else:\n diffusion_loss = 0\n # compute the GAN loss if chosen\n # not implemented yet\n if self.use_gan:\n gan_loss = self.gan_loss(x_t, cond, x_t_sm)\n else:\n gan_loss = 0\n\n # compute the total loss\n \n loss = cmt_loss + self.diffusion_lambda * diffusion_loss + self.gan_lambda * gan_loss\n \n # perform the backward pass\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n # update the ema weights\n self._update_ema_weights()\n \n return loss, cmt_loss, diffusion_loss, gan_loss\n \n def sample_noise_levels(self, shape, N, device='cpu'):\n \"\"\"\n Samples a tensor of the specified shape with noise levels \n from `N` discretized levels of the noise scheduler.\n\n Args:\n shape (tuple): Shape of the tensor to sample.\n N (int): Number of discrete noise levels to discretize the scheduler.\n device (str): Device on which to create the noise levels, 'cpu' or 'cuda'.\n\n Returns:\n torch.Tensor: Tensor containing sampled noise levels.\n \"\"\"\n # Get the N discretized noise levels\n discretized_sigmas = get_sigmas_exponential(N, self.sigma_min, self.sigma_max, self.device)\n \n # Sample indices from this discretized range\n t = torch.randint(1, N, size=shape, device=device)\n s = torch.round(torch.rand_like(t.to(torch.float32)) * t.to(torch.float32)).to(torch.int32)\n u = torch.round(torch.rand_like(t.to(torch.float32)) * (t.to(torch.float32) -1 - s.to(torch.float32))+ s).to(torch.int32)\n # Use these indices to gather the noise levels from the discretized sigmas\n sigma_t = discretized_sigmas[t]\n sigma_s = discretized_sigmas[s]\n sigma_u = discretized_sigmas[u]\n return sigma_t, sigma_s, sigma_u\n\n def solver(self, x, cond, t, s):\n \"\"\"\n Eq. (3) in the paper\n \"\"\"\n if self.use_teacher:\n solver = self.teacher_model\n else:\n solver = self.model\n\n if self.solver_type == 'euler':\n solver_pred = self.euler_update_step(solver, x, cond, t, s)\n elif self.solver_type == 'heun':\n solver_pred = self.heun_update_step(solver, x, cond, t, s)\n elif self.solver_type == 'ddim':\n solver_pred = self.ddim_update_step(solver, x, cond, t, s)\n\n return solver_pred\n\n \n def eval_step(self, x, cond):\n \"\"\"\n Eval step method to compute the loss for the action prediction.\n \"\"\"\n self.model.eval()\n self.target_model.eval()\n x = x.to(self.device)\n cond = cond.to(self.device)\n # next generate the discrete timesteps\n t = [self.sample_discrete_timesteps(i) for i in range(self.t_steps)]\n # compute the loss\n x_T = torch.randn_like(x) * self.sigma_max\n pred_x = self. sample(x_T, cond, t)\n loss = torch.nn.functional.mse_loss(pred_x, x)\n return loss\n \n def ctm_loss(self, x_t, cond, t, s, u, solver_target):\n \"\"\"\n # TODO add description\n\n Args:\n x (torch.Tensor): Input tensor of shape [batch_size, dim].\n cond (torch.Tensor): Conditioning tensor of shape [batch_size, cond_dim].\n t1 (torch.Tensor): First discrete timestep tensor of shape [batch_size, 1].\n t2 (torch.Tensor): Second discrete timestep tensor of shape [batch_size, 1].\n\n Returns:\n torch.Tensor: Consistency loss tensor of shape [].\n \"\"\"\n jump_target = einops.repeat(torch.tensor([0]), '1 -> (b 1)', b=len(x_t))\n # compute the cmt prediction: jump from t to s\n ctm_pred = self.cmt_wrapper(self.model, x_t, cond, t, s)\n\n # compute the cmt target prediction with ema parameters inside self.target_model: jump from u to s\n # with torch.no_grad():\n ctm_target = self.cmt_wrapper(self.target_model, solver_target, cond, u, s)\n ctm_target_clean = self.cmt_wrapper(self.target_model, ctm_target, cond, s, jump_target)\n\n # transform them into the clean data space by jumping without gradient from s to 0\n # for both predictions and comparing them in the clean data space\n # with torch.no_grad():\n ctm_pred_clean = self.cmt_wrapper(self.target_model, ctm_pred, cond, s, jump_target)\n \n # compute the cmt loss\n cmt_loss = torch.nn.functional.mse_loss(ctm_pred_clean, ctm_target_clean)\n\n return cmt_loss\n\n\n @torch.no_grad() \n def heun_update_step(self, model, x, cond, t1, t2):\n \"\"\"\n Computes a single Heun update step from the Euler sampler with the teacher model\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n denoised = self.cmt_wrapper(model, x, cond, t1, t1)\n d = (x - denoised) / append_dims(t1, x.ndim)\n \n \n sample_temp = x + d * append_dims(t2 - t1, x.ndim)\n denoised_2 = self.cmt_wrapper(model, sample_temp, cond, t2, t2)\n d_2 = (sample_temp - denoised_2) / append_dims(t2, x.ndim)\n d_prime = (d + d_2) / 2\n samples = x + d_prime * append_dims(t2 - t1, x.ndim)\n \n return samples\n \n @torch.no_grad() \n def ddim_update_step(self, model, x, cond, t1, t2):\n \"\"\"\n Computes a single Heun update step from the DDIM sampler with the teacher model\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n denoised = self.cmt_wrapper(model, x, cond, t1, t1)\n \n t, t_next = t_fn(t1), t_fn(t2)\n h = append_dims(t_next - t, x.ndim)\n samples = append_dims((sigma_fn(t_next) / sigma_fn(t)), x.ndim) * x - (-h).expm1() * denoised\n \n return samples\n\n def get_diffusion_scalings(self, sigma):\n \"\"\"\n Computes the scaling factors for diffusion training at a given time step sigma.\n\n Args:\n - self: the object instance of the model\n - sigma (float or torch.Tensor): the time step at which to compute the scaling factors\n \n , where self.sigma_data: the data noise level of the diffusion process, set during initialization of the model\n\n Returns:\n - c_skip (torch.Tensor): the scaling factor for skipping the diffusion model for the given time step sigma\n - c_out (torch.Tensor): the scaling factor for the output of the diffusion model for the given time step sigma\n - c_in (torch.Tensor): the scaling factor for the input of the diffusion model for the given time step sigma\n\n \"\"\"\n c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2)\n c_out = sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5\n c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5\n return c_skip, c_out, c_in\n \n @staticmethod\n def mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))\n\n def diffusion_train_step(self, x, cond, train_step, max_steps):\n \"\"\"\n Computes the training loss and performs a single update step for the score-based model.\n\n Args:\n - self: the object instance of the model\n - x (torch.Tensor): the input tensor of shape (batch_size, dim)\n - cond (torch.Tensor): the conditional input tensor of shape (batch_size, cond_dim)\n\n Returns:\n - loss.item() (float): the scalar value of the training loss for this batch\n\n \"\"\"\n self.model.train()\n x = x.to(self.device)\n cond = cond.to(self.device)\n self.optimizer.zero_grad()\n t = self.make_sample_density()(shape=(len(x),), device=self.device)\n x_t = x + torch.randn_like(x) * append_dims(t, x.ndim)\n loss = self.diffusion_loss(x, x_t, cond, t)\n loss.backward()\n self.optimizer.step()\n return loss.item()\n\n \n def diffusion_loss(self, x, x_t, cond, t):\n \"\"\"\n Computes the diffusion training loss for the given model, input, condition, and time.\n\n Args:\n - self: the object instance of the model\n - x (torch.Tensor): the input tensor of shape (batch_size, channels, height, width)\n - cond (torch.Tensor): the conditional input tensor of shape (batch_size, cond_dim)\n - t (torch.Tensor): the time step tensor of shape (batch_size,)\n\n Returns:\n - loss (torch.Tensor): the diffusion training loss tensor of shape ()\n\n The diffusion training loss is computed based on the following equation from Karras et al. 2022:\n loss = (model_output - target)^2.mean()\n where,\n - noise: a tensor of the same shape as x, containing randomly sampled noise\n - x_t: a tensor of the same shape as x, obtained by adding the noise tensor to x\n - c_skip, c_out, c_in: scaling tensors obtained from the diffusion scalings for the given time step\n - t: a tensor of the same shape as t, obtained by taking the natural logarithm of t and dividing it by 4\n - model_output: the output tensor of the model for the input x_1, condition cond, and time t\n - target: the target tensor for the given input x, scaling tensors c_skip, c_out, c_in, and time t\n \"\"\"\n c_skip, c_out, c_in = [append_dims(x, 2) for x in self.get_diffusion_scalings(t)]\n t = torch.log(t) / 4\n model_output = self.model(x_t * c_in, cond, t, t)\n target = (x - c_skip * x_t) / c_out\n return (model_output - target).pow(2).mean()\n \n def update_teacher_model(self):\n self.teacher_model.load_state_dict(self.target_model.state_dict())\n for param in self.teacher_model.parameters():\n param.requires_grad = False\n \n # next we init the model and target model with the same weights from the teacher\n self.model.load_state_dict(self.teacher_model.state_dict())\n for param in self.model.parameters():\n param.requires_grad = True\n self.target_model.load_state_dict(self.teacher_model.state_dict())\n for param in self.target_model.parameters():\n param.requires_grad = False\n print('Updated Teacher Model and froze all parameters!')\n \n def euler_update_step(self, x, t1, t2, denoised):\n \"\"\"\n Computes a single update step from the Euler sampler with a ground truth value.\n\n Parameters:\n x (torch.Tensor): The input tensor.\n t1 (torch.Tensor): The initial timestep.\n t2 (torch.Tensor): The final timestep.\n x0 (torch.Tensor): The ground truth value used to compute the Euler update step.\n\n Returns:\n torch.Tensor: The output tensor after taking the Euler update step.\n \"\"\"\n d = (x - denoised) / append_dims(t1, x.ndim)\n samples = x + d * append_dims(t2 - t1, x.ndim)\n return samples\n \n def euler_single_step(self, model, x, cond, t1, t2):\n \"\"\"\n \n \"\"\"\n denoised = self.diffusion_wrapper(model, x, cond, t1, t1)\n d = (x - denoised) / append_dims(t1, x.ndim)\n samples = x + d * append_dims(t2 - t1, x.ndim)\n return samples\n\n @torch.no_grad()\n @ema_eval_wrapper\n def sample_singlestep(self, x_shape, cond, return_seq=False):\n \"\"\"\n Samples a single step from the trained consistency trajectory model. \n If return_seq is True, returns a list of sampled tensors, \n otherwise returns a single tensor. \n \n Args:\n - x_shape (tuple): the shape of the tensor to be sampled.\n - cond (torch.Tensor or None): the conditional tensor.\n - return_seq (bool, optional): whether to return a list of sampled tensors (default False).\n \n Returns:\n - (torch.Tensor or list): the sampled tensor(s).\n \"\"\"\n sampled_x = []\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max\n sampled_x.append(x)\n x = self.cmt_wrapper(self.model, x, cond, torch.tensor([self.sigma_max]), torch.tensor([0]))\n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n \n @torch.no_grad()\n @ema_eval_wrapper\n def sample_diffusion_euler(self, x_shape, cond, n_sampling_steps=None, return_seq=False):\n \"\"\"\n Sample from the pre-trained diffusion model using the Euler method. This method is used for sanity checking \n the learned diffusion model. It generates a sequence of samples by taking small steps from one sample to the next. \n At each step, it generates a new noise from a normal distribution and combines it with the previous sample \n to get the next sample.\n \n Parameters:\n - x_shape (torch.Tensor): Shape of the input tensor to the model.\n - cond (torch.Tensor): Conditional information for the model.\n - n_sampling_steps (int, optional): Number of sampling steps to take. Defaults to None.\n - return_seq (bool, optional): Whether to return the full sequence of samples or just the final one. \n Defaults to False.\n \n Returns:\n - x (torch.Tensor or List[torch.Tensor]): Sampled tensor from the model. If `return_seq=True`, it returns\n a list of tensors, otherwise it returns a single tensor.\n \"\"\"\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max \n # x = torch.linspace(-4, 4, len(x_shape)).view(len(x_shape), 1).to(self.device)\n\n sampled_x = []\n if n_sampling_steps is None:\n n_sampling_steps = self.n_sampling_steps\n \n # sample the sequence of timesteps\n sigmas = self.sample_seq_timesteps(N=n_sampling_steps, type='exponential')\n sampled_x.append(x)\n # iterate over the remaining timesteps\n for i in trange(len(sigmas) - 1, disable=True):\n denoised = self.diffusion_wrapper(self.model, x, cond, sigmas[i], sigmas[i])\n x = self.euler_update_step(x, sigmas[i], sigmas[i+1], denoised)\n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n \n @torch.no_grad()\n @ema_eval_wrapper\n def ctm_gamma_sampler(self, x_shape, cond, gamma, n_sampling_steps=None, return_seq=False):\n \"\"\"\n Alg. 3 in the paper of CTM (page 22)\n \"\"\"\n self.model.eval()\n if cond is not None:\n cond = cond.to(self.device)\n x = torch.randn_like(x_shape).to(self.device) * self.sigma_max\n # x = torch.linspace(-4, 4, len(x_shape)).view(len(x_shape), 1).to(self.device)\n\n sampled_x = []\n if n_sampling_steps is None:\n n_sampling_steps = self.n_sampling_steps\n \n # sample the sequence of timesteps\n sigmas = self.sample_seq_timesteps(N=n_sampling_steps, type='exponential')\n sampled_x.append(x)\n # iterate over the remaining timesteps\n for i in trange(len(sigmas) - 1, disable=True):\n # get thenew sigma value \n sigma_hat = sigmas[i+1] * torch.sqrt(1 - gamma ** 2)\n # get the denoised value\n x_t_gamma = self.cmt_wrapper(self.model, x, cond, sigmas[i], sigma_hat)\n \n if sigmas[i + 1] > 0:\n x = x_t_gamma + gamma * sigmas[i+1] * torch.randn_like(x_shape).to(self.device)\n \n sampled_x.append(x)\n if return_seq:\n return sampled_x\n else:\n return x\n\n def sample_seq_timesteps(self, N=100, type='karras'):\n \"\"\"\n Generates a sequence of N timesteps for the given type.\n\n Args:\n - self: the object instance of the model\n - N (int): the number of timesteps to generate\n - type (str): the type of sequence to generate, either 'karras', 'linear', or 'exponential'\n\n Returns:\n - t (torch.Tensor): the generated sequence of timesteps of shape (N,)\n\n The method generates a sequence of timesteps for the given type using one of the following functions:\n - get_sigmas_karras: a function that generates a sequence of timesteps using the Karras et al. schedule\n - get_sigmas_linear: a function that generates a sequence of timesteps linearly spaced between sigma_min and sigma_max\n - get_sigmas_exponential: a function that generates a sequence of timesteps exponentially spaced between sigma_min and sigma_max\n where,\n - self.sigma_min, self.sigma_max: the minimum and maximum timesteps, set during initialization of the model\n - self.rho: the decay rate for the Karras et al. schedule, set during initialization of the model\n - self.device: the device on which to generate the timesteps, set during initialization of the model\n\n \"\"\"\n if type == 'karras':\n t = get_sigmas_karras(N, self.sigma_min, self.sigma_max, self.rho, self.device)\n elif type == 'linear':\n t = get_sigmas_linear(N, self.sigma_min, self.sigma_max, self.device)\n elif type == 'exponential':\n t = get_sigmas_exponential(N, self.sigma_min, self.sigma_max, self.device)\n else:\n raise NotImplementedError('Chosen Scheduler is implemented!')\n return t\n \n def make_sample_density(self):\n \"\"\"\n Returns a function that generates random timesteps based on the chosen sample density.\n\n Args:\n - self: the object instance of the model\n\n Returns:\n - sample_density_fn (callable): a function that generates random timesteps\n\n The method returns a callable function that generates random timesteps based on the chosen sample density.\n The available sample densities are:\n - 'lognormal': generates random timesteps from a log-normal distribution with mean and standard deviation set\n during initialization of the model also used in Karras et al. (2022)\n - 'loglogistic': generates random timesteps from a log-logistic distribution with location parameter set to the\n natural logarithm of the sigma_data parameter and scale and range parameters set during initialization\n of the model\n - 'loguniform': generates random timesteps from a log-uniform distribution with range parameters set during\n initialization of the model\n - 'uniform': generates random timesteps from a uniform distribution with range parameters set during initialization\n of the model\n - 'v-diffusion': generates random timesteps using the Variational Diffusion sampler with range parameters set during\n initialization of the model\n - 'discrete': generates random timesteps from the noise schedule using the exponential density\n - 'split-lognormal': generates random timesteps from a split log-normal distribution with mean and standard deviation\n set during initialization of the model\n \"\"\"\n sd_config = []\n \n if self.sigma_sample_density_type == 'lognormal':\n loc = self.sigma_sample_density_mean # if 'mean' in sd_config else sd_config['loc']\n scale = self.sigma_sample_density_std # if 'std' in sd_config else sd_config['scale']\n return partial(rand_log_normal, loc=loc, scale=scale)\n \n if self.sigma_sample_density_type == 'loglogistic':\n loc = sd_config['loc'] if 'loc' in sd_config else math.log(self.sigma_data)\n scale = sd_config['scale'] if 'scale' in sd_config else 0.5\n min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_log_logistic, loc=loc, scale=scale, min_value=min_value, max_value=max_value)\n \n if self.sigma_sample_density_type == 'loguniform':\n min_value = sd_config['min_value'] if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_log_uniform, min_value=min_value, max_value=max_value)\n if self.sigma_sample_density_type == 'uniform':\n return partial(rand_uniform, min_value=self.sigma_min, max_value=self.sigma_max)\n\n if self.sigma_sample_density_type == 'v-diffusion':\n min_value = self.min_value if 'min_value' in sd_config else self.sigma_min\n max_value = sd_config['max_value'] if 'max_value' in sd_config else self.sigma_max\n return partial(rand_v_diffusion, sigma_data=self.sigma_data, min_value=min_value, max_value=max_value)\n if self.sigma_sample_density_type == 'discrete':\n sigmas = self.get_noise_schedule(self.n_sampling_steps, 'exponential')\n return partial(rand_discrete, values=sigmas)\n else:\n raise ValueError('Unknown sample density type')" }, { "identifier": "DataGenerator", "path": "ctm/toy_tasks/data_generator.py", "snippet": "class DataGenerator:\n def __init__(self, dist_type: str):\n self.dist_type = dist_type\n self.func_mapping = {\n \"two_gmm_1D\": (self.two_gmm_1D, self.two_gmm_1D_log_prob),\n \"uneven_two_gmm_1D\": (self.uneven_two_gmm_1D, self.uneven_two_gmm_1D_log_prob),\n \"three_gmm_1D\": (self.three_gmm_1D, self.three_gmm_1D_log_prob),\n \"single_gaussian_1D\": (self.single_gaussian_1D, self.single_gaussian_1D_log_prob),\n }\n if self.dist_type not in self.func_mapping:\n raise ValueError(\"Invalid distribution type\")\n self.sample_func, self.log_prob_func = self.func_mapping[self.dist_type]\n\n def generate_samples(self, num_samples: int):\n \"\"\"\n Generate `num_samples` samples and labels using the `sample_func`.\n \n Args:\n num_samples (int): Number of samples to generate.\n \n Returns:\n Tuple[np.ndarray, np.ndarray]: A tuple of two numpy arrays containing the generated samples and labels.\n \"\"\"\n samples, labels = self.sample_func(num_samples)\n return samples, labels\n \n def compute_log_prob(self, samples, exp: bool = False):\n \"\"\"\n Compute the logarithm of probability density function (pdf) of the given `samples`\n using the `log_prob_func`. If `exp` is True, return exponentiated log probability.\n \n Args:\n samples (np.ndarray): Samples for which pdf is to be computed.\n exp (bool, optional): If True, return exponentiated log probability.\n Default is False.\n \n Returns:\n np.ndarray: Logarithm of probability density function (pdf) of the given `samples`.\n If `exp` is True, exponentiated log probability is returned.\n \"\"\"\n return self.log_prob_func(samples, exp=exp)\n\n @staticmethod\n def two_gmm_1D(num_samples,):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of two Gaussians with equal weights.\n \n Args:\n num_samples (int): Number of samples to generate.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.3)\n mixture_probs = torch.ones(num_samples) * 0.5\n is_from_g1 = torch.bernoulli(mixture_probs).bool()\n samples = torch.where(is_from_g1, g1.sample((num_samples,)), g2.sample((num_samples,)))\n return samples, is_from_g1.int()\n\n @staticmethod\n def uneven_two_gmm_1D(num_samples, w1=0.7):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of two Gaussians with weights `w1` and `w2`.\n \n Args:\n num_samples (int): Number of samples to generate.\n w1 (float, optional): Weight of first Gaussian component. Default is 0.7.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.2)\n mixture_probs = torch.tensor([w1, 1-w1])\n is_from_g1 = torch.bernoulli(mixture_probs.repeat(num_samples, 1)).view(num_samples, -1).bool().squeeze()\n \n samples_g1 = g1.sample((num_samples, 1))\n samples_g2 = g2.sample((num_samples, 1))\n samples = torch.where(is_from_g1, samples_g1, samples_g2).squeeze()\n\n return samples, is_from_g1.int()\n \n @staticmethod\n def single_gaussian_1D(num_samples):\n \"\"\"\n Generates `num_samples` samples from a 1D Gaussian distribution.\n \n Args:\n num_samples (int): Number of samples to generate.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and binary labels indicating which Gaussian component the sample is from.\n Since there is only one Gaussian component, all labels will be zero.\n \"\"\"\n g1 = Normal(loc=1, scale=0.2)\n samples = g1.sample((num_samples, 1))\n return samples, torch.zeros(num_samples).int()\n\n @staticmethod\n def three_gmm_1D(num_samples):\n \"\"\"\n Generates `num_samples` samples from a 1D mixture of three Gaussians with equal weights.\n \n Args:\n num_samples (int): Number of samples to generate.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n Tuple[torch.Tensor, torch.Tensor]: A tuple of two torch tensors containing the generated\n samples and integer labels indicating which Gaussian component the sample is from.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.2)\n g2 = Normal(loc=0, scale=0.2)\n g3 = Normal(loc=1.5, scale=0.2)\n mixture_probs = torch.ones(3) / 3\n component_assignments = torch.multinomial(mixture_probs, num_samples, replacement=True)\n samples = torch.zeros(num_samples, 1)\n \n g1_mask = (component_assignments == 0)\n g2_mask = (component_assignments == 1)\n g3_mask = (component_assignments == 2)\n \n samples[g1_mask] = g1.sample((g1_mask.sum(), )).view(-1, 1)\n samples[g2_mask] = g2.sample((g2_mask.sum(), )).view(-1, 1)\n samples[g3_mask] = g3.sample((g3_mask.sum(), )).view(-1, 1)\n \n return samples, component_assignments.int()\n\n @staticmethod\n def two_gmm_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of two Gaussians\n with equal weights at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of two Gaussians\n with equal weights at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.3)\n f = torch.log(0.5 * (g1.log_prob(z).exp() + g2.log_prob(z).exp()))\n if exp:\n return torch.exp(f)\n else:\n return f\n \n @staticmethod\n def uneven_two_gmm_1D_log_prob(z, w1=0.7, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of two Gaussians\n with weights `w1` and `w2` at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n w1 (float, optional): Weight of first Gaussian component. Default is 0.7.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of two Gaussians\n with weights `w1` and `w2` at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.3)\n g2 = Normal(loc=1.5, scale=0.2)\n f = torch.log(w1 * g1.log_prob(z).exp() + (1 - w1) * g2.log_prob(z).exp())\n if exp:\n return torch.exp(f)\n else:\n return f\n\n @staticmethod\n def three_gmm_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D mixture of three Gaussians\n with equal weights at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D mixture of three Gaussians\n with equal weights at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g1 = Normal(loc=-1.5, scale=0.2)\n g2 = Normal(loc=0, scale=0.2)\n g3 = Normal(loc=1.5, scale=0.2)\n f = torch.log(1/3 * (g1.log_prob(z).exp() + g2.log_prob(z).exp() + g3.log_prob(z).exp()))\n if exp:\n return torch.exp(f)\n else:\n return f\n\n @staticmethod\n def single_gaussian_1D_log_prob(z, exp=False):\n \"\"\"\n Computes the logarithm of the probability density function (pdf) of a 1D Gaussian\n distribution at the given points `z`.\n \n Args:\n z (torch.Tensor): Points at which to compute the pdf.\n exp (bool, optional): If True, return exponentiated log probability. Default is False.\n \n Returns:\n torch.Tensor: Logarithm of probability density function (pdf) of a 1D Gaussian\n distribution at the given points `z`. If `exp` is True, exponentiated log probability\n is returned.\n \"\"\"\n g = Normal(loc=1, scale=0.2)\n f = g.log_prob(z)\n if exp:\n return torch.exp(f)\n else:\n return f" }, { "identifier": "plot_main_figure", "path": "ctm/visualization/vis_utils.py", "snippet": "def plot_main_figure(\n fn, \n model, \n n_samples, \n train_epochs, \n sampling_method='euler',\n x_range=[-4, 4], \n n_sampling_steps = 10,\n save_path='/home/moritz/code/cm_1D_Toy_Task/plots'\n): \n \"\"\"\n Plot the main figure for the given model and sampling method.\n Args:\n fn (callable): Target function to be plotted.\n model (object): Model to be used for sampling (ConsistencyModel or Beso).\n n_samples (int): Number of samples to be taken.\n train_epochs (int): Number of training epochs.\n sampling_method (str, optional): Method to be used for sampling ('multistep', 'onestep', or 'euler'). Defaults to False.\n x_range (list, optional): Range of x values to be plotted. Defaults to [-5, 5].\n n_sampling_steps (int, optional): Number of sampling steps. Defaults to 10.\n save_path (str, optional): Directory to save the plot. Defaults to '/home/moritz/code/cm_1D_Toy_Task/plots'.\n\n Raises ValueError: If the sampling_method is not one of the specified options ('multistep', 'onestep', or 'euler').\n \"\"\"\n test_samples = get_test_samples(model, n_samples, sampling_method, n_sampling_steps)\n test_samples = [x.detach().cpu().numpy() for x in test_samples]\n test_samples = np.stack(test_samples, axis=1)\n\n x_test = np.linspace(x_range[0], x_range[1], n_samples)\n target_fn = fn(torch.tensor(x_test), exp=True)\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 10), sharex=True)\n ax1.set_xlim(*x_range)\n ax2.set_xlim(*x_range)\n ax3.set_xlim(*x_range)\n\n # Plot target distribution\n ax1.plot(x_test, target_fn, color='black', label='Target Distribution')\n\n # Plot predicted distribution\n kde = gaussian_kde(test_samples[:, -1, 0], bw_method=0.1)\n predicted_distribution = kde(x_test)\n ax1.plot(x_test, predicted_distribution, label='Predicted Distribution')\n\n # Create a LineCollection to show colors on the predicted distribution line\n points = np.array([x_test, predicted_distribution]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap='viridis', norm=plt.Normalize(predicted_distribution.min(), predicted_distribution.max()))\n lc.set_array(predicted_distribution)\n lc.set_linewidth(2)\n\n ax1.add_collection(lc)\n stepsize = np.linspace(0, 1, model.n_sampling_steps)\n # stepsize = cm.get_noise_schedule(model.n_sampling_steps, noise_schedule_type='exponential').flip(0)\n # ax2.set_ylim(-0.1, 1.1)\n if sampling_method == 'onestep':\n n_sampling_steps = 1\n stepsize = np.linspace(0, 1, 2)\n ax2.quiver(test_samples[:, 0].reshape(-1),\n stepsize[0] * np.ones(n_samples),\n test_samples[:, 1].reshape(-1) - test_samples[:, 0].reshape(-1),\n stepsize[1] * np.ones(n_samples) - stepsize[0] * np.ones(n_samples),\n angles='xy', scale_units='xy', scale=1,\n width=0.001\n )\n else:\n n_sampling_steps = n_sampling_steps\n for i in range(1, n_sampling_steps):\n ax2.quiver(test_samples[:, i - 1].reshape(-1),\n stepsize[i - 1] * np.ones(n_samples),\n test_samples[:, i].reshape(-1) - test_samples[:, i-1].reshape(-1),\n stepsize[i] * np.ones(n_samples) - stepsize[i - 1] * np.ones(n_samples),\n angles='xy', scale_units='xy', scale=1,\n width=0.001\n )\n ax2.set_yticks([stepsize.min(), stepsize.max()])\n ax2.set_ylim(stepsize.min(), stepsize.max())\n \n mu = 0 # mean\n sigma = model.sigma_max # standard deviation\n\n # Compute the PDF values for x_test\n prob_samples = norm.pdf(x_test, loc=mu, scale=sigma)\n # Create a LineCollection to show colors on the normal distribution line\n points = np.array([x_test, prob_samples]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n lc = LineCollection(segments, cmap='viridis', norm=plt.Normalize(prob_samples.min(), prob_samples.max()))\n lc.set_array(prob_samples)\n lc.set_linewidth(2)\n\n ax3.add_collection(lc)\n ax3.set_ylim(0, 0.5)\n\n # ... (previous code remains unchanged)\n ax2.set_xticks([])\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax3.set_yticks([])\n ax2.set_yticklabels(['T', '0'])\n ax2.tick_params(axis='y', labelsize=16)\n # ax2.set_yticks('log')\n plt.subplots_adjust(hspace=0)\n plt.savefig(save_path + '/cm_' + sampling_method + f'_epochs_{train_epochs}.png', bbox_inches='tight', pad_inches=0.1) \n \n print('Plot saved!')" } ]
from tqdm import tqdm from ctm.ctm import ConsistencyTrajectoryModel from ctm.toy_tasks.data_generator import DataGenerator from ctm.visualization.vis_utils import plot_main_figure
11,448
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True
""" Discrete consistency distillation training of the consistency model on a toy task. We train a diffusion model and the consistency model at the same time and iteratively update the weights of the consistency model and the diffusion model. """ if __name__ == "__main__": device = 'cpu' n_sampling_steps = 10 use_pretraining = True
cm = ConsistencyTrajectoryModel(
0
2023-11-07 15:30:11+00:00
16k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
dynapipe/pipe/data_loader.py
[ { "identifier": "ProfileBasedCostModelWithRC", "path": "dynapipe/data_opt/cost_models.py", "snippet": "class ProfileBasedCostModelWithRC(object):\n \"\"\"\n Wrapper class for multiple ProfileBasedCostModel objects, one for each\n tensor parallel degree and recomputation method.\n \"\"\"\n\n def __init__(\n self,\n profile_paths=None,\n _serialized_cms: Optional[Dict[Tuple[int, str], bytes]] = None,\n ) -> None:\n self.cost_models: dict[str, ProfileBasedCostModel] = {}\n if _serialized_cms is not None:\n for cm_key, serialized_cm in _serialized_cms.items():\n self.cost_models[cm_key] = ProfileBasedCostModel.deserialize(\n serialized_cm\n )\n return\n if not isinstance(profile_paths, list):\n # profile_paths is a dir\n assert os.path.isdir(profile_paths), (\n f\"Profile path {profile_paths} is not a directory \"\n \"or list of paths\"\n )\n profile_paths = [\n os.path.join(profile_paths, x)\n for x in os.listdir(profile_paths)\n if x.startswith(\"microbench\") and x.endswith(\"txt\")\n ]\n # separate paths by cost model key (tp_size, rc_type)\n self.per_key_profile_paths = defaultdict(list)\n for path in profile_paths:\n cm_key = self._parse_cm_key(path)\n self.per_key_profile_paths[cm_key].append(path)\n for cm_key, paths in self.per_key_profile_paths.items():\n self.cost_models[cm_key] = ProfileBasedCostModel(paths)\n\n def _parse_cm_key(self, filename):\n basename = os.path.basename(filename)\n if \"rc_full_uniform\" in basename:\n rc_type = \"full\"\n elif \"rc_selective\" in basename:\n rc_type = \"selective\"\n else:\n rc_type = \"none\"\n tp_size = int(basename.split(\"_\")[1][2:])\n return tp_size, rc_type\n\n def _check_valid_cm_key(self, cm_key):\n assert (\n cm_key in self.cost_models\n ), f\"Key {cm_key} not recorded in profile.\"\n\n def is_valid_stage(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].is_valid_stage(stage)\n\n def valid_stages(self, tp_size, rc_type):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].valid_stages()\n\n def supported_sequence_lengths(self, tp_size, rc_type, stage):\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].supported_sequence_lengths(\n stage\n )\n\n def get_cost(\n self,\n tp_size,\n rc_type,\n stage,\n seq_len,\n mbs,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the computation cost.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_cost(\n stage, seq_len, mbs\n )\n\n def get_stored_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the stored activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_stored_activation(\n stage, seq_len, mbs\n )\n\n def get_peak_activation(self, tp_size, rc_type, stage, seq_len, mbs):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the peak activation.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_peak_activation(\n stage, seq_len, mbs\n )\n\n def get_model_state(\n self,\n tp_size,\n rc_type,\n stage,\n n_shards=1,\n zero_stage=0,\n param_factor=None,\n ):\n \"\"\"Select the corresponding cost model based on TP degree and\n recomputation type and get the model state.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)].get_model_state(\n stage,\n n_shards=n_shards,\n zero_stage=zero_stage,\n param_factor=param_factor,\n )\n\n def get_raw_cost_model(self, tp_size, rc_type):\n \"\"\"Get the raw cost model for the given TP degree and recomputation\n type.\n \"\"\"\n self._check_valid_cm_key((tp_size, rc_type))\n return self.cost_models[(tp_size, rc_type)]\n\n def save(self, path):\n serialized_dict = {}\n for cm_key, cost_model in self.cost_models.items():\n serialized_dict[cm_key] = cost_model.serialize()\n with open(path, \"wb\") as f:\n pickle.dump(serialized_dict, f)\n\n @classmethod\n def load(cls, path):\n with open(path, \"rb\") as f:\n serialized_dict = pickle.load(f)\n return cls(_serialized_cms=serialized_dict)" }, { "identifier": "DataAssignmentOptimizer", "path": "dynapipe/data_opt/optimizer.py", "snippet": "class DataAssignmentOptimizer(object):\n \"\"\"Data assignment optimizer.\n\n Optimizes the assignment of a mini-batch of data into micro-batches.\n \"\"\"\n\n def __init__(\n self,\n cost_model: ProfileBasedCostModelWithRC,\n model_spec: TransformerModelSpec,\n n_executors: int,\n n_layers_per_stage: int,\n n_chunks_per_device: int = 1,\n dp_size: int = 1,\n tp_size: int = 1,\n zero_stage: int = 0,\n device_memory_limit: float = float(\"inf\"),\n round_seqlen_multiple=8,\n per_mb_memory_fraction=None,\n len_pack_sep_tokens=1,\n len_decoder_additional_tokens=2,\n seqlen_offset=0,\n ):\n \"\"\"Optimizer for assigning data samples into micro-batches.\n cost_model: cost model for the model used\n model_spec: model specification\n n_executors: number of stages of the pipelined model\n n_layers_per_stage: number of layers per each pipeline stage\n n_chunks_per_device: number of chunks per device\n (> 1 indicating interleaved schedule)\n dp_size: data parallelism degree\n tp_size: tensor parallelism degree\n zero_stage: stage of ZeRO optimizer\n device_memory_limit: memory limit in MB (MegaBytes)\n round_seqlen_multiple: always round sequence length to multiple of\n this number, required for some kernels\n default: 8\n len_pack_sep_tokens: number of tokens used to separate samples in the\n packed sequence, only used when enable_packing\n is True during optimization.\n len_decoder_additional_tokens: number of additional tokens added to\n the decoder sequence length other than\n the target sequence, e.g. <bos>, <eos>\n seqlen_offset: should be set 1 for decoder only models, whose input\n and target sequences are data sequence length - 1\n 0 for encoder-decoder models.\n \"\"\"\n self.cost_model = cost_model\n self.n_executors = n_executors\n self.n_layers_per_stage = n_layers_per_stage\n # create memory model\n self.model_spec = model_spec\n self.memory_limit = device_memory_limit\n self.dp_size = dp_size\n self.tp_size = tp_size\n self.zero_stage = zero_stage\n self.round_seqlen_multiple = round_seqlen_multiple\n self.len_pack_sep_tokens = len_pack_sep_tokens\n self.len_decoder_additional_tokens = len_decoder_additional_tokens\n self.n_chunks_per_device = n_chunks_per_device\n self.per_mb_memory_fraction = per_mb_memory_fraction\n self.seqlen_offset = seqlen_offset\n\n def _round_seqlen(self, seqlen, decoder=False):\n if decoder:\n seqlen += self.len_decoder_additional_tokens\n seqlen -= self.seqlen_offset\n return (\n (seqlen + self.round_seqlen_multiple - 1)\n // self.round_seqlen_multiple\n * self.round_seqlen_multiple\n + self.seqlen_offset\n )\n\n def _solve_sample_order_tsp_problem(\n self,\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=True,\n dist_function=\"sum\",\n use_clustering=True,\n distance_threshold=16,\n ):\n \"\"\"Solve the TSP problem to determine the sample order.\"\"\"\n if dist_function == \"sum\":\n\n def _f_dist(x, y):\n return abs(int(x[0]) - int(y[0])) + abs(int(x[1]) - int(y[1]))\n\n elif dist_function == \"max\":\n\n def _f_dist(x, y):\n return max(\n abs(int(x[0]) - int(y[0])), abs(int(x[1]) - int(y[1]))\n )\n\n elif dist_function == \"square\":\n\n def _f_dist(x, y):\n return (int(x[0]) - int(y[0])) ** 2 + (\n int(x[1]) - int(y[1])\n ) ** 2\n\n else:\n raise ValueError(\n \"Unknown distance function: {}\".format(dist_function)\n )\n\n def _get_distance_matrix(points):\n # add a dummy point at the beginning\n # to transform it into an open TSP problem\n distance_matrix = [[0] * (len(points) + 1)]\n for x in points:\n row = [0]\n for y in points:\n row.append(_f_dist(x, y))\n distance_matrix.append(row)\n return distance_matrix\n\n input_points = list(\n zip(sample_sequence_lengths, decoder_sample_sequence_lengths)\n )\n if use_clustering:\n vectors_np = np.array(input_points)\n clustering = AgglomerativeClustering(\n n_clusters=None,\n distance_threshold=distance_threshold,\n linkage=\"complete\",\n ).fit(vectors_np)\n labels = clustering.labels_\n n_clusters = max(labels) + 1\n cluster_to_samples = [[] for _ in range(n_clusters)]\n cluster_to_data = [[] for _ in range(n_clusters)]\n for sample_idx, label in enumerate(labels):\n cluster_to_samples[label].append(sample_idx)\n cluster_to_data[label].append(input_points[sample_idx])\n # compute cluster centroids\n cluster_to_center = [None] * n_clusters\n for cluster_label, data in enumerate(cluster_to_data):\n cluster_to_center[cluster_label] = tuple(np.mean(data, axis=0))\n # compute tsp for cluster centroids\n distance_matrix = np.array(_get_distance_matrix(cluster_to_center))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n # reconstruct orig order\n result = []\n for cluster_label in permutation:\n result += cluster_to_samples[cluster_label]\n # sanity check result is a valid permutation\n assert sorted(result) == list(range(len(result)))\n return result\n\n distance_matrix = np.array(_get_distance_matrix(input_points))\n permutation = list(\n np.array(\n elkai.solve_int_matrix(\n distance_matrix, 1, bottleneck=bottleneck_tsp\n )\n )\n - 1\n )[1:]\n return permutation\n\n def _pack(\n self,\n sequence: list,\n current_enc_length,\n current_dec_length,\n target_enc_length,\n target_dec_length,\n next_idx,\n samples_with_ids,\n consumed,\n ):\n for j in range(next_idx, len(samples_with_ids)):\n if consumed[j]:\n continue\n (\n seqlen_to_pack,\n dec_seqlen_to_pack,\n sample_id_to_pack,\n ) = samples_with_ids[j]\n if (\n current_enc_length + seqlen_to_pack <= target_enc_length\n and current_dec_length + dec_seqlen_to_pack\n <= target_dec_length\n ):\n sequence.append(sample_id_to_pack)\n current_enc_length += seqlen_to_pack\n current_dec_length += dec_seqlen_to_pack\n consumed[j] = True\n return current_enc_length, current_dec_length\n\n def _uniform_partition(self, samples_with_ids, microbatch_size):\n max_sequence_length = max([x[0] for x in samples_with_ids])\n max_decoder_sequence_length = max([x[1] for x in samples_with_ids])\n\n # round sequence length to multiple of round_seqlen_multiple\n max_sequence_length = self._round_seqlen(max_sequence_length)\n max_decoder_sequence_length = self._round_seqlen(\n max_decoder_sequence_length, decoder=True\n )\n # pack all sequences into fixed sequence length\n target_src_seqlen = max_sequence_length\n target_tgt_seqlen = (\n max_decoder_sequence_length - self.len_decoder_additional_tokens\n )\n consumed = [False] * len(samples_with_ids)\n sequences = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n if consumed[idx]:\n continue\n curr_sequence = []\n curr_sequence_seqlen = seqlen\n curr_sequence_dec_seqlen = dec_seqlen\n curr_sequence.append(idx)\n curr_sequence_seqlen, curr_sequence_dec_seqlen = self._pack(\n curr_sequence,\n curr_sequence_seqlen,\n curr_sequence_dec_seqlen,\n target_src_seqlen,\n target_tgt_seqlen,\n idx + 1,\n samples_with_ids,\n consumed,\n )\n sequences.append(curr_sequence)\n consumed[idx] = True\n # divide sequences into microbatches\n microbatches = []\n for i in range(0, len(sequences), microbatch_size):\n microbatches.append(sequences[i : i + microbatch_size])\n return microbatches\n\n def _token_based_partition(self, samples_with_ids, microbatch_tokens):\n microbatches = []\n current_microbatch_tokens = 0\n current_microbatch = []\n for seqlen, dec_seqlen, idx in samples_with_ids:\n rounded_seqlen = self._round_seqlen(seqlen)\n rounded_dec_seqlen = self._round_seqlen(dec_seqlen, decoder=True)\n if (\n current_microbatch_tokens + rounded_seqlen + rounded_dec_seqlen\n > microbatch_tokens\n ):\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch.copy())\n current_microbatch = []\n current_microbatch_tokens = 0\n current_microbatch.append([idx])\n current_microbatch_tokens += seqlen + dec_seqlen\n if len(current_microbatch) > 0:\n microbatches.append(current_microbatch)\n return microbatches\n\n def _subset_partition(self, micro_batch_costs):\n # partition the microbatches into subsets\n # create a mapping from microbatch index to its cost\n mb_cost_map = {}\n for i, mb in enumerate(micro_batch_costs):\n mb_cost_map[i] = mb\n return prtpy.partition(\n algorithm=prtpy.partitioning.kk,\n numbins=self.dp_size,\n items=mb_cost_map,\n )\n\n def generate_microbatches(\n self,\n sample_sequence_lengths,\n available_rc_types=None,\n decoder_sample_sequence_lengths=None,\n disable_tsp=False,\n bottleneck_tsp=False,\n tsp_dist_function=\"sum\",\n tsp_use_clustering=True,\n tsp_cluster_distance_threshold=16,\n partition_method=\"dp\",\n uniform_partition_batch_size=None,\n token_based_partition_mb_tokens=None,\n enable_packing=False,\n ):\n if available_rc_types is None:\n available_rc_types = [\"none\", \"selective\", \"full\"]\n if (\n self.n_chunks_per_device > 1\n and decoder_sample_sequence_lengths is None\n ):\n raise ValueError(\n \"Interleaved schedule with non-encoder-decoder models \"\n \"are not supported yet.\"\n )\n # stage 1: determine the order of samples\n if decoder_sample_sequence_lengths is None:\n samples_with_ids = [\n (seqlen, 0, i)\n for i, seqlen in enumerate(sample_sequence_lengths)\n ]\n # single sequence, sorting suffices\n samples_with_ids.sort(reverse=True)\n else:\n if partition_method == \"uniform\":\n assert uniform_partition_batch_size is not None, (\n \"uniform_partition_batch_size must be specified \"\n \"when partition_method is 'uniform'\"\n )\n # uniform partitioning, don't need to solve TSP\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n else:\n # multiple sequences, use TSP or 2 level sorting\n # to find the optimal order\n if disable_tsp:\n samples_with_ids = [\n (seqlen, dec_seqlen, i)\n for i, (seqlen, dec_seqlen) in enumerate(\n zip(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n ]\n # sort first by encoder sequence length, then by decoder\n samples_with_ids.sort(reverse=True)\n else:\n permutation = self._solve_sample_order_tsp_problem(\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n bottleneck_tsp=bottleneck_tsp,\n dist_function=tsp_dist_function,\n use_clustering=tsp_use_clustering,\n distance_threshold=tsp_cluster_distance_threshold,\n )\n samples_with_ids = [\n (\n sample_sequence_lengths[i],\n decoder_sample_sequence_lengths[i],\n int(i),\n )\n for i in permutation\n ]\n # stage 2: splitting and packing\n # we first calculate the model states memory and subtract it\n # from the memory limit\n # We assume that GPU0 is the bottleneck GPU, which holds Embedding\n # and Encoder of the model if not interleaved, and holds Embedding,\n # Encoder and Decoder of the model if interleaved.\n # rc_type doesn't matter here\n model_states_memory = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Embedding\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n encoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Encoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n if decoder_sample_sequence_lengths is not None:\n decoder_model_state = self.cost_model.get_model_state(\n self.tp_size,\n \"none\",\n \"Decoder\",\n n_shards=self.dp_size,\n zero_stage=self.zero_stage,\n )\n else:\n decoder_model_state = 0\n if self.n_chunks_per_device == 1:\n # not interleaved\n layer_states = max(encoder_model_state, decoder_model_state)\n else:\n # interleaved\n layer_states = encoder_model_state + decoder_model_state\n layer_states = layer_states * self.n_chunks_per_device / 2\n layer_states *= self.n_layers_per_stage\n model_states_memory += layer_states\n available_memory = self.memory_limit - model_states_memory\n\n if (\n self.per_mb_memory_fraction is not None\n and self.per_mb_memory_fraction > 0\n ):\n preferred_memory_limit = (\n self.per_mb_memory_fraction * available_memory\n )\n else:\n preferred_memory_limit = available_memory / self.n_executors\n for memory_type, memory_limit in [\n (\"preferred\", preferred_memory_limit),\n (\"available\", available_memory),\n ]:\n # first try to find a partition that do not need special schedule\n # if not found, only make sure that each single microbatch\n # fits in memory\n for rc_type in available_rc_types:\n if partition_method == \"dp\":\n # use dynamic programming to find optimal\n # sequential partition\n (\n objective_value,\n microbatches,\n microbatch_costs,\n ) = cpp_consecutive_partition_dp(\n self.cost_model.get_raw_cost_model(\n self.tp_size, rc_type\n ),\n self.n_executors,\n self.n_chunks_per_device,\n self.n_layers_per_stage,\n self.dp_size,\n memory_limit,\n available_memory,\n samples_with_ids,\n enable_packing=enable_packing,\n round_seqlen_multiple=self.round_seqlen_multiple,\n len_pack_sep_tokens=self.len_pack_sep_tokens,\n len_decoder_additional_tokens=self.len_decoder_additional_tokens, # noqa\n )\n elif partition_method == \"token_based\":\n assert token_based_partition_mb_tokens is not None, (\n \"token_based_partition_mb_tokens must be specified \"\n \"when partition_method is 'token_based'\"\n )\n # token based partitioning\n microbatches = self._token_based_partition(\n samples_with_ids, token_based_partition_mb_tokens\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n elif partition_method == \"uniform\":\n microbatches = self._uniform_partition(\n samples_with_ids, uniform_partition_batch_size\n )\n # dummy objective value, not used\n objective_value = (\n 0,\n 0,\n 0,\n [0] * len(microbatches),\n [0] * len(microbatches),\n )\n # dummy microbatch costs\n microbatch_costs = [0] * len(microbatches)\n else:\n raise ValueError(\n \"unknown partition method: {}\".format(partition_method)\n )\n if math.isinf(objective_value[0]) or math.isnan(\n objective_value[0]\n ):\n # memory limit is too small\n continue\n # sanity check microbatches:\n # make sure that each index appears once and only once\n all_indices = set()\n for mb in microbatches:\n for sample in mb:\n for index in sample:\n assert (\n index not in all_indices\n ), \"index {} appears more than once\".format(index)\n all_indices.add(index)\n assert sorted(list(all_indices)) == list(\n range(len(samples_with_ids))\n ), (\n \"not all indices appear in microbatches: \"\n \"{} v.s. {}. Input seqlens: {}, target seqlens: {}\".format(\n len(all_indices),\n len(samples_with_ids),\n sample_sequence_lengths,\n decoder_sample_sequence_lengths,\n )\n )\n # partition microbatches into subsets, each for one data\n # parallel group\n if self.dp_size > 1:\n partitioned_microbatch_ids = self._subset_partition(\n microbatch_costs\n )\n partitioned_microbatches = []\n for mb_ids in partitioned_microbatch_ids:\n partitioned_microbatches.append(\n [microbatches[i] for i in sorted(mb_ids)]\n )\n else:\n partitioned_microbatches = [microbatches]\n return (\n objective_value,\n partitioned_microbatches,\n memory_type,\n rc_type,\n (available_memory, model_states_memory, memory_limit),\n )\n # no feasible microbatch split found\n return None, None, None, None, None" }, { "identifier": "DynaPipeCluster", "path": "dynapipe/model.py", "snippet": "class DynaPipeCluster:\n def __init__(\n self,\n device2node: Dict[int, int],\n memory_limits: List[int],\n intra_node_bw_gbps: float,\n inter_node_bw_gbps: float,\n intra_node_lat_us: float,\n inter_node_lat_us: float,\n ) -> None:\n # memory_limits is in MB (megabytes)\n # bw is in Gbps (gigabits per second)\n # lat is in us (microseconds)\n devices = set()\n nodes = set()\n for device, node in device2node.items():\n devices.add(device)\n nodes.add(node)\n self.n_devices = len(devices)\n self.n_nodes = len(nodes)\n self.device2node = device2node\n flattened_devices = [device for device in device2node.keys()]\n assert list(sorted(list(set(flattened_devices)))) == list(\n range(self.n_devices)\n ), \"Device ids must be contiguous and start at 0\"\n assert len(memory_limits) == self.n_devices, (\n \"Expected memory limits for each of the \"\n f\"{self.n_devices} devices, but got \"\n f\"{len(memory_limits)} numbers.\"\n )\n self.memory_limits = memory_limits\n self.intra_node_bw = intra_node_bw_gbps\n self.inter_node_bw = inter_node_bw_gbps\n self.intra_node_lat = intra_node_lat_us\n self.inter_node_lat = inter_node_lat_us\n\n def _get_bw(self, dev0, dev1):\n if self.device2node[dev0] == self.device2node[dev1]:\n return self.intra_node_bw\n else:\n return self.inter_node_bw\n\n def _get_lat(self, dev0, dev1):\n if self.device2node[dev0] == self.device2node[dev1]:\n return self.intra_node_lat\n else:\n return self.inter_node_lat\n\n def get_comm_time(self, megabytes, dev0, dev1):\n if dev0 == dev1:\n return 0\n return self._get_lat(dev0, dev1) + 1e6 * (\n megabytes * 8 / 1e3\n ) / self._get_bw(dev0, dev1)\n\n def get_memory_limit(self, dev):\n return self.memory_limits[dev]\n\n def to_json(self) -> dict:\n return {\n \"n_devices\": self.n_devices,\n \"n_nodes\": self.n_nodes,\n \"device2node\": self.device2node,\n \"memory_limits\": self.memory_limits,\n \"intra_node_bw\": self.intra_node_bw,\n \"inter_node_bw\": self.inter_node_bw,\n \"intra_node_lat\": self.intra_node_lat,\n \"inter_node_lat\": self.inter_node_lat,\n }\n\n def dumps(self) -> str:\n return json.dumps(self.to_json())\n\n @staticmethod\n def loads(json_str: str) -> \"DynaPipeCluster\":\n return DynaPipeCluster.from_json(json.loads(json_str))\n\n @staticmethod\n def from_json(json_dict):\n converted_device2node = {\n int(k): int(v) for k, v in json_dict[\"device2node\"].items()\n }\n json_dict[\"device2node\"] = converted_device2node\n cluster = DynaPipeCluster(\n json_dict[\"device2node\"],\n json_dict[\"memory_limits\"],\n json_dict[\"intra_node_bw\"],\n json_dict[\"inter_node_bw\"],\n json_dict[\"intra_node_lat\"],\n json_dict[\"inter_node_lat\"],\n )\n return cluster" }, { "identifier": "TransformerModelSpec", "path": "dynapipe/model.py", "snippet": "class TransformerModelSpec:\n # Default setting:\n # * mlp_hidden_size = 4x hidden_dim\n # * kv_channels = hidden_dim // num_attn_heads\n # * use FP16 mixed precision training with Adam optimizer.\n n_encoder_layers: int\n n_decoder_layers: int\n hidden_dim: int\n num_attn_heads: int\n mlp_hidden_dim: Union[None, int] = None\n kv_channels: Union[None, int] = None\n bytes_per_element: int = 2\n optimizer_state_multiplier: int = 12\n\n def __post_init__(self):\n if self.mlp_hidden_dim is None:\n # if not specified, use the 4x hidden dim as it is the norm\n self.mlp_hidden_dim = self.hidden_dim * 4\n if self.kv_channels is None:\n # if not specified, use the hidden_dim // num_attn_heads\n assert self.hidden_dim % self.num_attn_heads == 0\n self.kv_channels = self.hidden_dim // self.num_attn_heads\n\n def serialize(self) -> bytes:\n def _serialize_int(x: int):\n return x.to_bytes(4, \"little\")\n\n return b\"\".join(\n [\n _serialize_int(x)\n for x in [\n self.n_encoder_layers,\n self.n_decoder_layers,\n self.hidden_dim,\n self.num_attn_heads,\n self.mlp_hidden_dim,\n self.kv_channels,\n self.bytes_per_element,\n self.optimizer_state_multiplier,\n ]\n ]\n )\n\n @classmethod\n def deserialize(cls, data: bytes):\n def _deserialize_int(data: bytes):\n return int.from_bytes(data, \"little\")\n\n return cls(\n *[_deserialize_int(data[i * 4 : (i + 1) * 4]) for i in range(8)]\n )" }, { "identifier": "deserialize_list_of_eps", "path": "dynapipe/pipe/instructions.py", "snippet": "def deserialize_list_of_eps(\n bytes: bytes, config=SerializationConfig(), deserialize_inner=True\n) -> Tuple[List[Union[ExecutionPlan, bytes]]]:\n \"\"\"Deserialize a list of execution plans from a byte array.\"\"\"\n n_eps = int.from_bytes(\n bytes[: config.EXECUTION_PLAN_META_BYTES],\n config.BYTES_ENDIANNESS,\n )\n bytes = bytes[config.EXECUTION_PLAN_META_BYTES :]\n eps = []\n for _ in range(n_eps):\n ep_bytes_len = int.from_bytes(\n bytes[: config.SERIALIZED_SIZE_BYTES],\n config.BYTES_ENDIANNESS,\n )\n bytes = bytes[config.SERIALIZED_SIZE_BYTES :]\n ep_bytes = bytes[:ep_bytes_len]\n if deserialize_inner:\n ep = ExecutionPlan.deserialize(ep_bytes, config=config)\n eps.append(ep)\n else:\n eps.append(ep_bytes)\n bytes = bytes[ep_bytes_len:]\n assert len(bytes) == 0\n return eps" }, { "identifier": "serialize_list_of_eps", "path": "dynapipe/pipe/instructions.py", "snippet": "def serialize_list_of_eps(\n eps: List[ExecutionPlan], config=SerializationConfig()\n) -> bytes:\n \"\"\"Serialize a list of execution plans to a byte array.\"\"\"\n result = len(eps).to_bytes(\n config.EXECUTION_PLAN_META_BYTES, config.BYTES_ENDIANNESS\n )\n for ep in eps:\n ep_bytes = ep.serialize(config)\n ep_bytes_len = len(ep_bytes).to_bytes(\n config.SERIALIZED_SIZE_BYTES, config.BYTES_ENDIANNESS\n )\n result += ep_bytes_len + ep_bytes\n\n return result" }, { "identifier": "ExecutionPlanner", "path": "dynapipe/schedule_opt/execution_planner.py", "snippet": "class ExecutionPlanner:\n def __init__(\n self,\n cluster_spec: DynaPipeCluster,\n model_spec: TransformerModelSpec,\n device_assignment: List[int],\n device_memory_limit: int,\n cost_model: ProfileBasedCostModelWithRC,\n dp_size: int = 1,\n tp_size: int = 1,\n zero_stage: int = 0,\n logger: Optional[logging.Logger] = None,\n ) -> None:\n self.cluster_spec = cluster_spec\n self.model_spec = model_spec\n self.cost_model = cost_model\n self.device_assignment = device_assignment\n self.n_devices = max(device_assignment) + 1\n self.device_memory_limit = device_memory_limit\n self.dp_size = dp_size\n self.tp_size = tp_size\n self.zero_stage = zero_stage\n self.logger = logger\n (\n self.device_assignment_type,\n self.valid_schedule_methods,\n self.n_layers_per_stage,\n self.n_chunks_per_device,\n ) = validate_device_assignment(\n model_spec, cluster_spec, self.device_assignment\n )\n\n def _create_candidates(\n self,\n batch: List[Tuple[int, int, int]],\n schedule_method=\"dynamic\",\n rc_type=None,\n ):\n if rc_type is not None:\n if not isinstance(rc_type, list):\n available_rc_types = [rc_type]\n else:\n available_rc_types = rc_type\n else:\n available_rc_types = [\"none\", \"selective\", \"full\"]\n if schedule_method == \"dynamic\":\n sch_methods = self.valid_schedule_methods\n spec_args = []\n for rc_type in available_rc_types:\n for sch in sch_methods:\n spec_args.append((sch, rc_type))\n else:\n if schedule_method not in self.valid_schedule_methods:\n raise ValueError(\n \"Invalid schedule scheme: \"\n \"{} for device assignment: {}\".format(\n schedule_method, self.device_assignment\n )\n )\n spec_args = [\n (schedule_method, rc_type) for rc_type in available_rc_types\n ]\n candidates = []\n for schedule_method, rc_type in spec_args:\n minibatch_spec = construct_minibatch_spec(\n self.model_spec,\n self.cost_model,\n batch,\n rc_type,\n dp_size=self.dp_size,\n tp_size=self.tp_size,\n zero_stage=self.zero_stage,\n )\n if minibatch_spec is not None:\n candidates.append((schedule_method, rc_type, minibatch_spec))\n return candidates\n\n def _optimize_instructions(\n self,\n instructions: List[List[PipeInstruction]],\n n_stages: int,\n ):\n # instructions: instructions for each executor\n # Necessary steps to ensure correctness:\n # 1. Add CommunicationFinishInsturctions at appropriate places\n # 2. Allocate buffer slots (not buffer themselves)\n # Potential optimizations:\n # 1. Merge consecutive communication instructions (trade-off)\n # 2. Reschedule communication instructions\n # 3. Pre-allocate buffers to reduce memory fragmentation\n instrs, n_buffers = InstructionOptimizer(\n instructions, n_stages\n ).optimize()\n return instrs, n_buffers\n\n def generate_execution_plan(\n self,\n batch: List[Tuple[int, int, int]],\n limit_rc_type=None,\n schedule_method=\"dynamic\",\n disable_permute_microbatches=False,\n disable_scheduler_memory_limit=False,\n current_batch_idx=None,\n ):\n candidates = self._create_candidates(\n batch, schedule_method=schedule_method, rc_type=limit_rc_type\n )\n best_instrs = None\n best_sch = None\n best_rc = None\n best_cost = None\n best_stats = None\n for schedule_method, rc_type, minibatch_spec in candidates:\n (\n max_makespan,\n _,\n _,\n min_makespan,\n min_stats,\n min_instructions,\n ) = optimize_schedule(\n schedule_method,\n minibatch_spec,\n self.cluster_spec,\n self.device_assignment,\n try_permutations=not disable_permute_microbatches,\n include_memory_stats=True,\n progress_bar=False,\n memory_limit=self.device_memory_limit,\n disable_scheduler_memory_limit=disable_scheduler_memory_limit,\n raise_on_oom=False,\n rc_type=rc_type,\n logger=self.logger,\n )\n if max_makespan < 1e-5:\n # no feasible schedule\n if self.logger:\n self.logger.debug(\n \"No feasible schedule for batch {} \"\n \"using {} and recompute {}\".format(\n current_batch_idx, schedule_method, rc_type\n )\n )\n continue\n if best_cost is None or min_makespan < best_cost:\n best_cost = min_makespan\n best_sch = schedule_method\n best_rc = rc_type\n best_instrs = min_instructions\n best_stats = min_stats\n if best_instrs is None:\n raise RuntimeError(\n \"No feasible schedule for batch {}.\".format(current_batch_idx)\n )\n # get total number of stages\n best_instrs: List[List[PipeInstruction]]\n n_stages = (\n max([instr.stage for instrs in best_instrs for instr in instrs])\n + 1\n )\n assigned_stages_per_executor = []\n for instrs in best_instrs:\n assigned_stages = set()\n for instr in instrs:\n assigned_stages.add(instr.stage)\n assigned_stages = sorted(list(assigned_stages))\n assigned_stages_per_executor.append(assigned_stages)\n # construct execution plan\n if best_cost is None:\n # no feasible schedule\n return None, None, None, None, None\n assert len(best_instrs) == self.n_devices\n # run necessary optimization pass on instructions\n optimized_instrs, n_buffers = self._optimize_instructions(\n best_instrs, n_stages\n )\n execution_plans = [\n ExecutionPlan(\n instr,\n len(batch),\n self.n_devices,\n n_stages,\n i,\n assigned_stages_per_executor[i],\n name_to_recompute_method(best_rc),\n n_buffer,\n )\n for i, (instr, n_buffer) in enumerate(\n zip(optimized_instrs, n_buffers)\n )\n ]\n return execution_plans, best_cost, best_stats, best_rc, best_sch" }, { "identifier": "create_logger", "path": "dynapipe/utils/logger.py", "snippet": "class DynaPipeFormatter(logging.Formatter):\nclass LoggerWriter(object):\n def __init__(self, prefix=None, distributed_rank=None, colored=True):\n def _get_fmt_colored(self, level):\n def _get_fmt(self):\n def format(self, record):\n def __init__(self, writers):\n def write(self, message: str):\n def flush(self):\ndef create_logger(\n name=None,\n prefix=None,\n level=_default_logging_level,\n distributed_rank=None,\n log_file=None,\n):" }, { "identifier": "RedisKVStore", "path": "dynapipe/pipe/kv_redis.py", "snippet": "class RedisKVStore(object):\n # a blocking redis client\n def __init__(self, host, port, is_master=False):\n self.is_master = is_master\n self.host = host\n self.port = port\n if self.is_master:\n self.server = self._run_redis_server()\n # wait for redis server to start\n t = time.time()\n while True:\n try:\n self.client = redis.Redis(host=host, port=port, db=0)\n self.client.ping()\n break\n except redis.exceptions.ConnectionError:\n time.sleep(KVREDIS_POLLING_INTERVAL)\n if time.time() - t > KVREDIS_CONNECT_TIMEOUT:\n raise RuntimeError(\n \"WARNING: Cannot connect to KV Server. \"\n \"Is DYNAPIPE_KV_HOST and \"\n \"DYNAPIPE_KV_PORT set correctly?\"\n )\n continue\n # register cleanup\n atexit.register(self.__del__)\n\n def __del__(self):\n if self.is_master:\n if self.server.poll() is not None:\n return\n self.server.send_signal(subprocess.signal.SIGINT)\n self.server.wait()\n\n def _run_redis_server(self):\n # run a redis server\n p = subprocess.Popen(\n [\n REDIS_CMD,\n \"--save\",\n \"\",\n \"--port\",\n str(self.port),\n \"--bind\",\n str(self.host),\n ],\n shell=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n return p\n\n def wait(self, keys, timeout=None):\n # wait for a key to be set\n time_start = datetime.datetime.now()\n if not isinstance(keys, (list, tuple)):\n keys = [keys]\n while True:\n if self.client.exists(*keys):\n break\n if (\n timeout is not None\n and datetime.datetime.now() - time_start > timeout\n ):\n # match torch kvstore behavior\n raise RuntimeError(\"Timeout\")\n time.sleep(KVREDIS_POLLING_INTERVAL)\n\n def get(self, key, wait=True):\n if wait:\n self.wait(key)\n return self.client.get(key)\n\n def set(self, key, value: str, logger=None):\n # match torch kvstore behavior\n value_bytes = value.encode()\n self.client.set(key, value_bytes)\n if logger:\n logger.debug(\"KVStore: set {} to {}\".format(key, value))\n\n def add(self, key, value: int):\n # match torch kvstore behavior\n return self.client.incr(key, value)\n\n def delete_key(self, key):\n return self.client.delete(key)" }, { "identifier": "validate_device_assignment", "path": "dynapipe/pipe/utils.py", "snippet": "def validate_device_assignment(\n model_spec: TransformerModelSpec,\n cluster_spec: DynaPipeCluster,\n device_assignment: List[int],\n):\n \"\"\"\n Validate device assignment and detect device assignment type.\n Args:\n device_assignment: List of device ids for each layer.\n \"\"\"\n appeared_devices = set()\n for device in device_assignment:\n if device not in appeared_devices:\n # new device\n assert device == len(appeared_devices), (\n \"Devices must appear in indexed order. \"\n \"e.g. [0, 1, 2, 3] is valid, \"\n \"[0, 1, 3, 2] is not valid.\"\n )\n appeared_devices.add(device)\n n_devices = len(appeared_devices)\n assert n_devices == cluster_spec.n_devices, (\n \"Number of devices used in device assignment \"\n \"must be equal to number of devices in cluster spec.\"\n )\n virtual_layer_to_actual_layers = [[]]\n virtual_layer_devices = [0]\n last_device = 0\n for device in device_assignment:\n if device == last_device:\n virtual_layer_to_actual_layers[-1].append(device)\n else:\n virtual_layer_to_actual_layers.append([device])\n virtual_layer_devices.append(device)\n last_device = device\n n_actual_layers_per_virtual_layer = len(virtual_layer_to_actual_layers[0])\n for virtual_layer in virtual_layer_to_actual_layers:\n n_encoder_layers_in_virtual_layer = len(\n [\n layer\n for layer in virtual_layer\n if layer < model_spec.n_encoder_layers\n ]\n )\n n_decoder_layers_in_virtual_layer = (\n len(virtual_layer) - n_encoder_layers_in_virtual_layer\n )\n if n_encoder_layers_in_virtual_layer > 0:\n assert (\n len(virtual_layer) == n_encoder_layers_in_virtual_layer\n ), \"Number of layers on each virtual layer must be the same.\"\n if n_decoder_layers_in_virtual_layer > 0:\n assert (\n len(virtual_layer) == n_decoder_layers_in_virtual_layer\n ), \"Number of layers on each virtual layer must be the same.\"\n if len(device_assignment) != n_actual_layers_per_virtual_layer:\n # only check if we are actually using pipeline parallelism\n assert (\n model_spec.n_encoder_layers % n_actual_layers_per_virtual_layer\n == 0\n ), (\n f\"Number of encoder layers ({model_spec.n_encoder_layers}) \"\n f\"must be divisible by number of layers on each virtual layer \"\n f\"({n_actual_layers_per_virtual_layer}).\"\n )\n assert (\n model_spec.n_decoder_layers % n_actual_layers_per_virtual_layer\n == 0\n ), (\n f\"Number of decoder layers ({model_spec.n_decoder_layers}) \"\n f\"must be divisible by number of layers on each virtual layer \"\n f\"({n_actual_layers_per_virtual_layer}).\"\n )\n # classify device assignment into linear, interleaved and other\n device_assignment_type = \"other\"\n if len(virtual_layer_devices) == n_devices:\n if virtual_layer_devices == list(range(n_devices)):\n device_assignment_type = \"linear\"\n else:\n n_chunks = len(virtual_layer_devices) // n_devices\n interleaved_assignment = list(range(n_devices)) * n_chunks\n if interleaved_assignment == virtual_layer_devices:\n device_assignment_type = \"interleaved\"\n if (\n device_assignment_type == \"interleaved\"\n and model_spec.n_decoder_layers == 0\n ):\n # interleaved device assignment is not supported for decoder only\n # models\n raise NotImplementedError(\n \"Interleaved device assignment is not supported \"\n \"for decoder only models.\"\n )\n valid_schedule_methods = [\"wait-free-cyclic\"]\n if device_assignment_type == \"linear\" and n_devices > 1:\n valid_schedule_methods.append(\"1F1B\")\n elif device_assignment_type == \"interleaved\":\n valid_schedule_methods.append(\"interleaved-1F1B\")\n n_chunks_per_device = len(virtual_layer_devices) // n_devices\n return (\n device_assignment_type,\n valid_schedule_methods,\n n_actual_layers_per_virtual_layer,\n n_chunks_per_device,\n )" } ]
import json import logging import multiprocessing as mp import os import time import traceback import torch import pickle from dataclasses import dataclass, field, fields from queue import Empty from typing import List, Optional from torch.utils.data import DataLoader as PTDataLoader from dynapipe.data_opt.cost_models import ProfileBasedCostModelWithRC from dynapipe.data_opt.optimizer import DataAssignmentOptimizer from dynapipe.model import DynaPipeCluster, TransformerModelSpec from dynapipe.pipe.instructions import ( deserialize_list_of_eps, serialize_list_of_eps, ) from dynapipe.schedule_opt.execution_planner import ExecutionPlanner from dynapipe.utils.logger import create_logger, logger from .kv_redis import RedisKVStore from .utils import validate_device_assignment
12,407
return kv_store, host, port def _checked_delete_key(kv_store: RedisKVStore, key: str, logger=None): result = kv_store.delete_key(key) if not result: raise RuntimeError( "Internal error: failed to delete key " "{}.".format(key) ) if logger is not None: logger.debug("Deleted key: {}".format(key)) def _get_from_shared_kv_store( kv_store: RedisKVStore, key: str, reader_idx: int, n_total_readers: int, decode: bool = True, logger=None, ): reader_count_key = key + "_rc" reader_ack_key = key + "_r{}_ack".format(reader_idx) # wait for reader ack if logger is not None: logger.debug("Waiting for reader ack key: {}".format(reader_ack_key)) kv_store.get(reader_ack_key) if logger is not None: logger.debug( "Got reader ack key: {}, waiting for data key: {}".format( reader_ack_key, key ) ) data = kv_store.get(key) if logger is not None: logger.debug("Removing reader ack key: {}".format(reader_ack_key)) # remove reader ack _checked_delete_key(kv_store, reader_ack_key, logger=logger) # get reader count reader_count = kv_store.add(reader_count_key, 1) if reader_count == n_total_readers: if logger is not None: logger.debug( "Last reader, reset reader count: {}".format(reader_count_key) ) # reset reader count result_readers = kv_store.add(reader_count_key, -n_total_readers) assert result_readers == 0 if logger is not None: logger.debug("Last reader, remove data key: {}".format(key)) # remove data key _checked_delete_key(kv_store, key, logger=logger) if logger is not None: logger.debug("Last reader, set ack key: {}".format(key + "_ack")) # set all reader ack keys keys_to_reset = [ key + "_r{}_ack".format(i) for i in range(n_total_readers) ] if logger is not None: logger.debug("Last reader, reset keys: {}".format(keys_to_reset)) for reset_key in keys_to_reset: val = kv_store.add(reset_key, 1) # make sure the key is set got_val = int(kv_store.get(reset_key).decode()) if not val == got_val: raise RuntimeError( "Failed to set reader ack key: {}".format(reset_key) ) if logger is not None: logger.debug("Set reader ack key: {}".format(reset_key)) # set data ack key kv_store.add(key + "_ack", 1) if decode: return data.decode() return data def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 MANAGER_PROCESS_TIMEOUT = 1 RECEIVER_PROCESS_TIMEOUT = 1 KVSTORE_TIMEOUT = 1800 # 30 minutes # ONLY USED FOR DEBUG PURPOSES DEBUG_USE_DUMMY_EP = False DEBUG_DUMP_EP_STATS = os.getenv( "DYNAPIPE_DEBUG_DUMP_EP_STATS", "False" ).lower() in ("true", "1", "t") DEBUG_DUMP_EP_PREFIX = os.environ.get("DYNAPIPE_DEBUG_DUMP_EP_PREFIX", None) if DEBUG_DUMP_EP_STATS and DEBUG_DUMP_EP_PREFIX is None: raise ValueError( "DYNAPIPE_DEBUG_DUMP_EP_PREFIX must be set if " "DYNAPIPE_DEBUG_DUMP_EP_STATS is set." ) _kvstore_handle = None def _init_kv_store(is_master, logger=None): host = os.environ.get("DYNAPIPE_KV_HOST", "localhost") port = os.environ.get("DYNAPIPE_KV_PORT", 29500) if logger is not None: logger.debug( "Init kv store, is_master: {}, host: {}, port: {}".format( is_master, host, port ) ) # kv_store = torch.distributed.TCPStore( # "127.0.0.1", # port, # is_master=is_master, # timeout=timedelta(seconds=KVSTORE_TIMEOUT), # ) kv_store = RedisKVStore(host, port, is_master=is_master) return kv_store, host, port def _checked_delete_key(kv_store: RedisKVStore, key: str, logger=None): result = kv_store.delete_key(key) if not result: raise RuntimeError( "Internal error: failed to delete key " "{}.".format(key) ) if logger is not None: logger.debug("Deleted key: {}".format(key)) def _get_from_shared_kv_store( kv_store: RedisKVStore, key: str, reader_idx: int, n_total_readers: int, decode: bool = True, logger=None, ): reader_count_key = key + "_rc" reader_ack_key = key + "_r{}_ack".format(reader_idx) # wait for reader ack if logger is not None: logger.debug("Waiting for reader ack key: {}".format(reader_ack_key)) kv_store.get(reader_ack_key) if logger is not None: logger.debug( "Got reader ack key: {}, waiting for data key: {}".format( reader_ack_key, key ) ) data = kv_store.get(key) if logger is not None: logger.debug("Removing reader ack key: {}".format(reader_ack_key)) # remove reader ack _checked_delete_key(kv_store, reader_ack_key, logger=logger) # get reader count reader_count = kv_store.add(reader_count_key, 1) if reader_count == n_total_readers: if logger is not None: logger.debug( "Last reader, reset reader count: {}".format(reader_count_key) ) # reset reader count result_readers = kv_store.add(reader_count_key, -n_total_readers) assert result_readers == 0 if logger is not None: logger.debug("Last reader, remove data key: {}".format(key)) # remove data key _checked_delete_key(kv_store, key, logger=logger) if logger is not None: logger.debug("Last reader, set ack key: {}".format(key + "_ack")) # set all reader ack keys keys_to_reset = [ key + "_r{}_ack".format(i) for i in range(n_total_readers) ] if logger is not None: logger.debug("Last reader, reset keys: {}".format(keys_to_reset)) for reset_key in keys_to_reset: val = kv_store.add(reset_key, 1) # make sure the key is set got_val = int(kv_store.get(reset_key).decode()) if not val == got_val: raise RuntimeError( "Failed to set reader ack key: {}".format(reset_key) ) if logger is not None: logger.debug("Set reader ack key: {}".format(reset_key)) # set data ack key kv_store.add(key + "_ack", 1) if decode: return data.decode() return data def _put_to_shared_kv_store( kv_store: RedisKVStore, key: str, data, logger=None ): # put execution plan into local kv store ack_key = key + "_ack" if logger is not None: logger.debug("Wait for data ack key: {}".format(ack_key)) # wait for ack key kv_store.get(ack_key) # remove ack key _checked_delete_key(kv_store, ack_key, logger=logger) if logger is not None: logger.debug("Set data key: {}".format(key)) # set data key kv_store.set(key, data) @dataclass class WorkerData: round_seqlen_multiple: Optional[int] = None logger: Optional[logging.Logger] = None kv_store: Optional[RedisKVStore] = None processed_batches: Optional[int] = None kv_buffer_size: Optional[int] = None seqlen_offset: Optional[int] = 0 def check_initialized(self): cls_fields = fields(self.__class__) for fld in cls_fields: if getattr(self, fld.name) is None: raise RuntimeError( "Worker data not initialized: {}".format(fld.name) ) @dataclass class PreprocessingWorkerData(WorkerData): # required at initialization: node_rank: Optional[int] = None profile_path: Optional[str] = None # filled later in worker init: dataopt: Optional[DataAssignmentOptimizer] = None
exec_planner: Optional[ExecutionPlanner] = None
6
2023-11-08 07:58:20+00:00
16k
SqueezeAILab/LLMCompiler
src/llm_compiler/llm_compiler.py
[ { "identifier": "AsyncStatsCallbackHandler", "path": "src/callbacks/callbacks.py", "snippet": "class AsyncStatsCallbackHandler(AsyncCallbackHandler):\n \"\"\"Collect useful stats about the run.\n Add more stats as needed.\"\"\"\n\n def __init__(self, stream: bool = False) -> None:\n super().__init__()\n self.cnt = 0\n self.input_tokens = 0\n self.output_tokens = 0\n # same for gpt-3.5\n self.encoder = tiktoken.encoding_for_model(\"gpt-4\")\n self.stream = stream\n self.all_times = []\n self.start_time = 0\n\n async def on_chat_model_start(self, serialized, prompts, **kwargs):\n self.start_time = time.time()\n if self.stream:\n # if streaming mode, on_llm_end response is not collected\n # therefore, we need to count input token based on the\n # prompt length at the beginning\n self.cnt += 1\n self.input_tokens += len(self.encoder.encode(prompts[0][0].content))\n\n async def on_llm_new_token(self, token, *args, **kwargs):\n if self.stream:\n # if streaming mode, on_llm_end response is not collected\n # therefore, we need to manually count output token based on the\n # number of streamed out tokens\n self.output_tokens += 1\n\n async def on_llm_end(self, response, *args, **kwargs):\n self.all_times.append(round(time.time() - self.start_time, 2))\n if not self.stream:\n # if not streaming mode, on_llm_end response is collected\n # so we can use this stats directly\n token_usage = response.llm_output[\"token_usage\"]\n self.input_tokens += token_usage[\"prompt_tokens\"]\n self.output_tokens += token_usage[\"completion_tokens\"]\n self.cnt += 1\n\n def reset(self) -> None:\n self.cnt = 0\n self.input_tokens = 0\n self.output_tokens = 0\n self.all_times = []\n\n def get_stats(self) -> dict[str, int]:\n return {\n \"calls\": self.cnt,\n \"input_tokens\": self.input_tokens,\n \"output_tokens\": self.output_tokens,\n \"all_times\": self.all_times,\n }" }, { "identifier": "Chain", "path": "src/chains/chain.py", "snippet": "class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):\n \"\"\"Abstract base class for creating structured sequences of calls to components.\n\n Chains should be used to encode a sequence of calls to components like\n models, document retrievers, other chains, etc., and provide a simple interface\n to this sequence.\n\n Copied from langchain v0.0.283.\n\n The Chain interface makes it easy to create apps that are:\n - Stateful: add Memory to any Chain to give it state,\n - Observable: pass Callbacks to a Chain to execute additional functionality,\n like logging, outside the main sequence of component calls,\n - Composable: the Chain API is flexible enough that it is easy to combine\n Chains with other components, including other Chains.\n\n The main methods exposed by chains are:\n - `__call__`: Chains are callable. The `__call__` method is the primary way to\n execute a Chain. This takes inputs as a dictionary and returns a\n dictionary output.\n - `run`: A convenience method that takes inputs as args/kwargs and returns the\n output as a string or object. This method can only be used for a subset of\n chains and cannot return as rich of an output as `__call__`.\n \"\"\"\n\n def invoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n config = config or {}\n return self(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n async def ainvoke(\n self,\n input: Dict[str, Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Dict[str, Any]:\n if type(self)._acall == Chain._acall:\n # If the chain does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n config = config or {}\n return await self.acall(\n input,\n callbacks=config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n **kwargs,\n )\n\n memory: Optional[BaseMemory] = None\n \"\"\"Optional memory object. Defaults to None.\n Memory is a class that gets called at the start\n and at the end of every chain. At the start, memory loads variables and passes\n them along in the chain. At the end, it saves any returned variables.\n There are many different types of memory - please see memory docs\n for the full catalog.\"\"\"\n callbacks: Callbacks = Field(default=None, exclude=True)\n \"\"\"Optional list of callback handlers (or callback manager). Defaults to None.\n Callback handlers are called throughout the lifecycle of a call to a chain,\n starting with on_chain_start, ending with on_chain_end or on_chain_error.\n Each custom chain can optionally call additional callback methods, see Callback docs\n for full details.\"\"\"\n callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)\n \"\"\"Deprecated, use `callbacks` instead.\"\"\"\n verbose: bool = Field(default_factory=_get_verbosity)\n \"\"\"Whether or not run in verbose mode. In verbose mode, some intermediate logs\n will be printed to the console. Defaults to `langchain.verbose` value.\"\"\"\n tags: Optional[List[str]] = None\n \"\"\"Optional list of tags associated with the chain. Defaults to None.\n These tags will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n metadata: Optional[Dict[str, Any]] = None\n \"\"\"Optional metadata associated with the chain. Defaults to None.\n This metadata will be associated with each call to this chain,\n and passed as arguments to the handlers defined in `callbacks`.\n You can use these to eg identify a specific instance of a chain with its use case.\n \"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def _chain_type(self) -> str:\n raise NotImplementedError(\"Saving not supported for this chain type.\")\n\n @root_validator()\n def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:\n \"\"\"Raise deprecation warning if callback_manager is used.\"\"\"\n if values.get(\"callback_manager\") is not None:\n if values.get(\"callbacks\") is not None:\n raise ValueError(\n \"Cannot specify both callback_manager and callbacks. \"\n \"callback_manager is deprecated, callbacks is the preferred \"\n \"parameter to pass in.\"\n )\n warnings.warn(\n \"callback_manager is deprecated. Please use callbacks instead.\",\n DeprecationWarning,\n )\n values[\"callbacks\"] = values.pop(\"callback_manager\", None)\n return values\n\n @validator(\"verbose\", pre=True, always=True)\n def set_verbose(cls, verbose: Optional[bool]) -> bool:\n \"\"\"Set the chain verbosity.\n\n Defaults to the global setting if not specified by the user.\n \"\"\"\n if verbose is None:\n return _get_verbosity()\n else:\n return verbose\n\n @property\n @abstractmethod\n def input_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain input.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def output_keys(self) -> List[str]:\n \"\"\"Keys expected to be in the chain output.\"\"\"\n raise NotImplementedError\n\n def _validate_inputs(self, inputs: Dict[str, Any]) -> None:\n \"\"\"Check that all inputs are present.\"\"\"\n missing_keys = set(self.input_keys).difference(inputs)\n if missing_keys:\n raise ValueError(f\"Missing some input keys: {missing_keys}\")\n\n def _validate_outputs(self, outputs: Dict[str, Any]) -> None:\n missing_keys = set(self.output_keys).difference(outputs)\n if missing_keys:\n raise ValueError(f\"Missing some output keys: {missing_keys}\")\n\n @abstractmethod\n def _call(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.__call__`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError\n\n async def _acall(\n self,\n inputs: Dict[str, Any],\n run_manager: Optional[AsyncCallbackManagerForChainRun] = None,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n This is a private method that is not user-facing. It is only called within\n `Chain.acall`, which is the user-facing wrapper method that handles\n callbacks configuration and some input/output processing.\n\n Args:\n inputs: A dict of named inputs to the chain. Assumed to contain all inputs\n specified in `Chain.input_keys`, including any inputs added by memory.\n run_manager: The callbacks manager that contains the callback handlers for\n this run of the chain.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n raise NotImplementedError(\"Async call not supported for this chain type.\")\n\n def __call__(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = CallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._call).parameters.get(\"run_manager\")\n run_manager = callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n self._call(inputs, run_manager=run_manager)\n if new_arg_supported\n else self._call(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n run_manager.on_chain_error(e)\n raise e\n run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n async def acall(\n self,\n inputs: Union[Dict[str, Any], Any],\n return_only_outputs: bool = False,\n callbacks: Callbacks = None,\n *,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n run_name: Optional[str] = None,\n include_run_info: bool = False,\n ) -> Dict[str, Any]:\n \"\"\"Asynchronously execute the chain.\n\n Args:\n inputs: Dictionary of inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n return_only_outputs: Whether to return only outputs in the\n response. If True, only new keys generated by this chain will be\n returned. If False, both input keys and new keys generated by this\n chain will be returned. Defaults to False.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n metadata: Optional metadata associated with the chain. Defaults to None\n include_run_info: Whether to include run info in the response. Defaults\n to False.\n\n Returns:\n A dict of named outputs. Should contain all outputs specified in\n `Chain.output_keys`.\n \"\"\"\n inputs = self.prep_inputs(inputs)\n callback_manager = AsyncCallbackManager.configure(\n callbacks,\n self.callbacks,\n self.verbose,\n tags,\n self.tags,\n metadata,\n self.metadata,\n )\n new_arg_supported = inspect.signature(self._acall).parameters.get(\"run_manager\")\n run_manager = await callback_manager.on_chain_start(\n dumpd(self),\n inputs,\n name=run_name,\n )\n try:\n outputs = (\n await self._acall(inputs, run_manager=run_manager)\n if new_arg_supported\n else await self._acall(inputs)\n )\n except (KeyboardInterrupt, Exception) as e:\n await run_manager.on_chain_error(e)\n raise e\n await run_manager.on_chain_end(outputs)\n final_outputs: Dict[str, Any] = self.prep_outputs(\n inputs, outputs, return_only_outputs\n )\n if include_run_info:\n final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)\n return final_outputs\n\n def prep_outputs(\n self,\n inputs: Dict[str, str],\n outputs: Dict[str, str],\n return_only_outputs: bool = False,\n ) -> Dict[str, str]:\n \"\"\"Validate and prepare chain outputs, and save info about this run to memory.\n\n Args:\n inputs: Dictionary of chain inputs, including any inputs added by chain\n memory.\n outputs: Dictionary of initial chain outputs.\n return_only_outputs: Whether to only return the chain outputs. If False,\n inputs are also added to the final outputs.\n\n Returns:\n A dict of the final chain outputs.\n \"\"\"\n self._validate_outputs(outputs)\n if self.memory is not None:\n self.memory.save_context(inputs, outputs)\n if return_only_outputs:\n return outputs\n else:\n return {**inputs, **outputs}\n\n def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:\n \"\"\"Validate and prepare chain inputs, including adding inputs from memory.\n\n Args:\n inputs: Dictionary of raw inputs, or single input if chain expects\n only one param. Should contain all inputs specified in\n `Chain.input_keys` except for inputs that will be set by the chain's\n memory.\n\n Returns:\n A dictionary of all inputs, including those added by the chain's memory.\n \"\"\"\n if not isinstance(inputs, dict):\n _input_keys = set(self.input_keys)\n if self.memory is not None:\n # If there are multiple input keys, but some get set by memory so that\n # only one is not set, we can still figure out which key it is.\n _input_keys = _input_keys.difference(self.memory.memory_variables)\n if len(_input_keys) != 1:\n raise ValueError(\n f\"A single string input was passed in, but this chain expects \"\n f\"multiple inputs ({_input_keys}). When a chain expects \"\n f\"multiple inputs, please call it by passing in a dictionary, \"\n \"eg `chain({'foo': 1, 'bar': 2})`\"\n )\n inputs = {list(_input_keys)[0]: inputs}\n if self.memory is not None:\n external_context = self.memory.load_memory_variables(inputs)\n inputs = dict(inputs, **external_context)\n self._validate_inputs(inputs)\n return inputs\n\n @property\n def _run_output_key(self) -> str:\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n return self.output_keys[0]\n\n def run(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n chain.run(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n chain.run(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n # Run at start to make sure this is possible/defined\n _output_key = self._run_output_key\n\n if args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if kwargs and not args:\n return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[\n _output_key\n ]\n\n if not kwargs and not args:\n raise ValueError(\n \"`run` supported with either positional arguments or keyword arguments,\"\n \" but none were provided.\"\n )\n else:\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n async def arun(\n self,\n *args: Any,\n callbacks: Callbacks = None,\n tags: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Convenience method for executing chain.\n\n The main difference between this method and `Chain.__call__` is that this\n method expects inputs to be passed directly in as positional arguments or\n keyword arguments, whereas `Chain.__call__` expects a single input dictionary\n with all the inputs\n\n\n Args:\n *args: If the chain expects a single input, it can be passed in as the\n sole positional argument.\n callbacks: Callbacks to use for this chain run. These will be called in\n addition to callbacks passed to the chain during construction, but only\n these runtime callbacks will propagate to calls to other objects.\n tags: List of string tags to pass to all callbacks. These will be passed in\n addition to tags passed to the chain during construction, but only\n these runtime tags will propagate to calls to other objects.\n **kwargs: If the chain expects multiple inputs, they can be passed in\n directly as keyword arguments.\n\n Returns:\n The chain output.\n\n Example:\n .. code-block:: python\n\n # Suppose we have a single-input chain that takes a 'question' string:\n await chain.arun(\"What's the temperature in Boise, Idaho?\")\n # -> \"The temperature in Boise is...\"\n\n # Suppose we have a multi-input chain that takes a 'question' string\n # and 'context' string:\n question = \"What's the temperature in Boise, Idaho?\"\n context = \"Weather report for Boise, Idaho on 07/03/23...\"\n await chain.arun(question=question, context=context)\n # -> \"The temperature in Boise is...\"\n \"\"\"\n if len(self.output_keys) != 1:\n raise ValueError(\n f\"`run` not supported when there is not exactly \"\n f\"one output key. Got {self.output_keys}.\"\n )\n elif args and not kwargs:\n if len(args) != 1:\n raise ValueError(\"`run` supports only one positional argument.\")\n return (\n await self.acall(\n args[0], callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n if kwargs and not args:\n return (\n await self.acall(\n kwargs, callbacks=callbacks, tags=tags, metadata=metadata\n )\n )[self.output_keys[0]]\n\n raise ValueError(\n f\"`run` supported with either positional arguments or keyword arguments\"\n f\" but not both. Got args: {args} and kwargs: {kwargs}.\"\n )\n\n def dict(self, **kwargs: Any) -> Dict:\n \"\"\"Dictionary representation of chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n **kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`\n method.\n\n Returns:\n A dictionary representation of the chain.\n\n Example:\n .. code-block:: python\n\n chain.dict(exclude_unset=True)\n # -> {\"_type\": \"foo\", \"verbose\": False, ...}\n \"\"\"\n if self.memory is not None:\n raise ValueError(\"Saving of memory is not yet supported.\")\n _dict = super().dict(**kwargs)\n _dict[\"_type\"] = self._chain_type\n return _dict\n\n def save(self, file_path: Union[Path, str]) -> None:\n \"\"\"Save the chain.\n\n Expects `Chain._chain_type` property to be implemented and for memory to be\n null.\n\n Args:\n file_path: Path to file to save the chain to.\n\n Example:\n .. code-block:: python\n\n chain.save(file_path=\"path/chain.yaml\")\n \"\"\"\n # Convert file to Path object.\n if isinstance(file_path, str):\n save_path = Path(file_path)\n else:\n save_path = file_path\n\n directory_path = save_path.parent\n directory_path.mkdir(parents=True, exist_ok=True)\n\n # Fetch dictionary to save\n chain_dict = self.dict()\n\n if save_path.suffix == \".json\":\n with open(file_path, \"w\") as f:\n json.dump(chain_dict, f, indent=4)\n elif save_path.suffix == \".yaml\":\n with open(file_path, \"w\") as f:\n yaml.dump(chain_dict, f, default_flow_style=False)\n else:\n raise ValueError(f\"{save_path} must be json or yaml\")\n\n def apply(\n self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None\n ) -> List[Dict[str, str]]:\n \"\"\"Call the chain on all inputs in the list.\"\"\"\n return [self(inputs, callbacks=callbacks) for inputs in input_list]" }, { "identifier": "JOINNER_REPLAN", "path": "src/llm_compiler/constants.py", "snippet": "JOINNER_REPLAN = \"Replan\"" }, { "identifier": "Planner", "path": "src/llm_compiler/planner.py", "snippet": "class Planner:\n def __init__(\n self,\n llm: BaseChatModel,\n example_prompt: str,\n example_prompt_replan: str,\n tools: Sequence[Union[Tool, StructuredTool]],\n stop: Optional[list[str]],\n ):\n self.llm = llm\n # different system prompt is needed when replanning\n # since they have different guidelines, and also examples provided by the user\n self.system_prompt = generate_llm_compiler_prompt(\n tools=tools,\n example_prompt=example_prompt,\n is_replan=False,\n )\n self.system_prompt_replan = generate_llm_compiler_prompt(\n tools=tools,\n example_prompt=example_prompt_replan,\n is_replan=True,\n )\n self.tools = tools\n self.output_parser = LLMCompilerPlanParser(tools=tools)\n self.stop = stop\n\n async def run_llm(\n self,\n inputs: dict[str, Any],\n is_replan: bool = False,\n callbacks: Callbacks = None,\n ) -> str:\n \"\"\"Run the LLM.\"\"\"\n if is_replan:\n system_prompt = self.system_prompt_replan\n assert \"context\" in inputs, \"If replanning, context must be provided\"\n human_prompt = f\"Question: {inputs['input']}\\n{inputs['context']}\\n\"\n else:\n system_prompt = self.system_prompt\n human_prompt = f\"Question: {inputs['input']}\"\n\n messages = [\n SystemMessage(content=system_prompt),\n HumanMessage(content=human_prompt),\n ]\n\n llm_response = await self.llm._call_async(\n messages,\n callbacks=callbacks,\n stop=self.stop,\n )\n log(\"LLMCompiler planner response: \\n\", llm_response.content, block=True)\n\n return llm_response.content\n\n async def plan(\n self, inputs: dict, is_replan: bool, callbacks: Callbacks = None, **kwargs: Any\n ):\n llm_response = await self.run_llm(\n inputs=inputs, is_replan=is_replan, callbacks=callbacks\n )\n llm_response = llm_response + \"\\n\"\n return self.output_parser.parse(llm_response)\n\n async def aplan(\n self,\n inputs: dict,\n task_queue: asyncio.Queue[Optional[str]],\n is_replan: bool,\n callbacks: Callbacks = None,\n **kwargs: Any,\n ) -> Plan:\n \"\"\"Given input, asynchronously decide what to do.\"\"\"\n all_callbacks = [\n LLMCompilerCallback(\n queue=task_queue,\n tools=self.tools,\n )\n ]\n if callbacks:\n all_callbacks.extend(callbacks)\n await self.run_llm(inputs=inputs, is_replan=is_replan, callbacks=all_callbacks)" }, { "identifier": "Task", "path": "src/llm_compiler/task_fetching_unit.py", "snippet": "class Task:\n idx: int\n name: str\n tool: Callable\n args: Collection[Any]\n dependencies: Collection[int]\n stringify_rule: Optional[Callable] = None\n thought: Optional[str] = None\n observation: Optional[str] = None\n is_join: bool = False\n\n async def __call__(self) -> Any:\n log(\"running task\")\n x = await self.tool(*self.args)\n log(\"done task\")\n return x\n\n def get_though_action_observation(\n self, include_action=True, include_thought=True, include_action_idx=False\n ) -> str:\n thought_action_observation = \"\"\n if self.thought and include_thought:\n thought_action_observation = f\"Thought: {self.thought}\\n\"\n if include_action:\n idx = f\"{self.idx}. \" if include_action_idx else \"\"\n if self.stringify_rule:\n # If the user has specified a custom stringify rule for the\n # function argument, use it\n thought_action_observation += f\"{idx}{self.stringify_rule(self.args)}\\n\"\n else:\n # Otherwise, we have a default stringify rule\n thought_action_observation += (\n f\"{idx}{self.name}\"\n f\"{_default_stringify_rule_for_arguments(self.args)}\\n\"\n )\n if self.observation is not None:\n thought_action_observation += f\"Observation: {self.observation}\\n\"\n return thought_action_observation" }, { "identifier": "TaskFetchingUnit", "path": "src/llm_compiler/task_fetching_unit.py", "snippet": "class TaskFetchingUnit:\n tasks: Dict[str, Task]\n tasks_done: Dict[str, asyncio.Event]\n remaining_tasks: set[str]\n\n def __init__(self):\n self.tasks = {}\n self.tasks_done = {}\n self.remaining_tasks = set()\n\n def set_tasks(self, tasks: dict[str, Any]):\n self.tasks.update(tasks)\n self.tasks_done.update({task_idx: asyncio.Event() for task_idx in tasks})\n self.remaining_tasks.update(set(tasks.keys()))\n\n def _all_tasks_done(self):\n return all(self.tasks_done[d].is_set() for d in self.tasks_done)\n\n def _get_all_executable_tasks(self):\n return [\n task_name\n for task_name in self.remaining_tasks\n if all(\n self.tasks_done[d].is_set() for d in self.tasks[task_name].dependencies\n )\n ]\n\n def _preprocess_args(self, task: Task):\n \"\"\"Replace dependency placeholders, i.e. ${1}, in task.args with the actual observation.\"\"\"\n args = []\n for arg in task.args:\n arg = _replace_arg_mask_with_real_value(arg, task.dependencies, self.tasks)\n args.append(arg)\n task.args = args\n\n async def _run_task(self, task: Task):\n self._preprocess_args(task)\n if not task.is_join:\n observation = await task()\n task.observation = observation\n self.tasks_done[task.idx].set()\n\n async def schedule(self):\n \"\"\"Run all tasks in self.tasks in parallel, respecting dependencies.\"\"\"\n # run until all tasks are done\n while not self._all_tasks_done():\n # Find tasks with no dependencies or with all dependencies met\n executable_tasks = self._get_all_executable_tasks()\n\n for task_name in executable_tasks:\n asyncio.create_task(self._run_task(self.tasks[task_name]))\n self.remaining_tasks.remove(task_name)\n\n await asyncio.sleep(SCHEDULING_INTERVAL)\n\n async def aschedule(self, task_queue: asyncio.Queue[Optional[Task]], func):\n \"\"\"Asynchronously listen to task_queue and schedule tasks as they arrive.\"\"\"\n no_more_tasks = False # Flag to check if all tasks are received\n\n while True:\n if not no_more_tasks:\n # Wait for a new task to be added to the queue\n task = await task_queue.get()\n\n # Check for sentinel value indicating end of tasks\n if task is None:\n no_more_tasks = True\n else:\n # Parse and set the new tasks\n self.set_tasks({task.idx: task})\n\n # Schedule and run executable tasks\n executable_tasks = self._get_all_executable_tasks()\n\n if executable_tasks:\n for task_name in executable_tasks:\n asyncio.create_task(self._run_task(self.tasks[task_name]))\n self.remaining_tasks.remove(task_name)\n elif no_more_tasks and self._all_tasks_done():\n # Exit the loop if no more tasks are expected and all tasks are done\n break\n else:\n # If no executable tasks are found, sleep for the SCHEDULING_INTERVAL\n await asyncio.sleep(SCHEDULING_INTERVAL)" }, { "identifier": "StructuredTool", "path": "src/tools/base.py", "snippet": "class StructuredTool(BaseTool):\n \"\"\"Tool that can operate on any number of inputs.\"\"\"\n\n description: str = \"\"\n args_schema: Type[BaseModel] = Field(..., description=\"The tool schema.\")\n \"\"\"The input arguments' schema.\"\"\"\n func: Optional[Callable[..., Any]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n return self.args_schema.schema()[\"properties\"]\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n return await asyncio.get_running_loop().run_in_executor(\n None,\n self._run,\n partial(self._run, run_manager=run_manager, **kwargs),\n *args,\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable] = None,\n coroutine: Optional[Callable[..., Awaitable[Any]]] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n infer_schema: bool = True,\n **kwargs: Any,\n ) -> StructuredTool:\n \"\"\"Create tool from a given function.\n\n A classmethod that helps to create a tool from a function.\n\n Args:\n func: The function from which to create a tool\n coroutine: The async function from which to create a tool\n name: The name of the tool. Defaults to the function name\n description: The description of the tool. Defaults to the function docstring\n return_direct: Whether to return the result directly or as a callback\n args_schema: The schema of the tool's input arguments\n infer_schema: Whether to infer the schema from the function's signature\n **kwargs: Additional arguments to pass to the tool\n\n Returns:\n The tool\n\n Examples:\n\n .. code-block:: python\n\n def add(a: int, b: int) -> int:\n \\\"\\\"\\\"Add two numbers\\\"\\\"\\\"\n return a + b\n tool = StructuredTool.from_function(add)\n tool.run(1, 2) # 3\n \"\"\"\n\n if func is not None:\n source_function = func\n elif coroutine is not None:\n source_function = coroutine\n else:\n raise ValueError(\"Function and/or coroutine must be provided\")\n name = name or source_function.__name__\n description = description or source_function.__doc__\n if description is None:\n raise ValueError(\n \"Function must have a docstring if description not provided.\"\n )\n\n # Description example:\n # search_api(query: str) - Searches the API for the query.\n sig = signature(source_function)\n description = f\"{name}{sig} - {description.strip()}\"\n _args_schema = args_schema\n if _args_schema is None and infer_schema:\n _args_schema = create_schema_from_function(f\"{name}Schema\", source_function)\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n args_schema=_args_schema,\n description=description,\n return_direct=return_direct,\n **kwargs,\n )" }, { "identifier": "Tool", "path": "src/tools/base.py", "snippet": "class Tool(BaseTool):\n \"\"\"Tool that takes in function or coroutine directly.\"\"\"\n\n description: str = \"\"\n func: Optional[Callable[..., str]]\n \"\"\"The function to run when the tool is called.\"\"\"\n coroutine: Optional[Callable[..., Awaitable[str]]] = None\n \"\"\"The asynchronous version of the function.\"\"\"\n stringify_rule: Optional[Callable[..., str]] = None\n\n # --- Runnable ---\n\n async def ainvoke(\n self,\n input: Union[str, Dict],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Any:\n if not self.coroutine:\n # If the tool does not implement async, fall back to default implementation\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self.invoke, input, config, **kwargs)\n )\n\n return super().ainvoke(input, config, **kwargs)\n\n # --- Tool ---\n\n @property\n def args(self) -> dict:\n \"\"\"The tool's input arguments.\"\"\"\n if self.args_schema is not None:\n return self.args_schema.schema()[\"properties\"]\n # For backwards compatibility, if the function signature is ambiguous,\n # assume it takes a single string input.\n return {\"tool_input\": {\"type\": \"string\"}}\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n args, kwargs = super()._to_args_and_kwargs(tool_input)\n # For backwards compatibility. The tool must be run with a single input\n all_args = list(args) + list(kwargs.values())\n if len(all_args) != 1:\n raise ToolException(\n f\"Too many arguments to single-input tool {self.name}.\"\n f\" Args: {all_args}\"\n )\n return tuple(all_args), {}\n\n def _run(\n self,\n *args: Any,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if self.func:\n new_argument_supported = signature(self.func).parameters.get(\"callbacks\")\n return (\n self.func(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else self.func(*args, **kwargs)\n )\n raise NotImplementedError(\"Tool does not support sync\")\n\n async def _arun(\n self,\n *args: Any,\n run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n \"\"\"Use the tool asynchronously.\"\"\"\n if self.coroutine:\n new_argument_supported = signature(self.coroutine).parameters.get(\n \"callbacks\"\n )\n return (\n await self.coroutine(\n *args,\n callbacks=run_manager.get_child() if run_manager else None,\n **kwargs,\n )\n if new_argument_supported\n else await self.coroutine(*args, **kwargs)\n )\n else:\n return await asyncio.get_running_loop().run_in_executor(\n None, partial(self._run, run_manager=run_manager, **kwargs), *args\n )\n\n # TODO: this is for backwards compatibility, remove in future\n def __init__(\n self, name: str, func: Optional[Callable], description: str, **kwargs: Any\n ) -> None:\n \"\"\"Initialize tool.\"\"\"\n super(Tool, self).__init__(\n name=name, func=func, description=description, **kwargs\n )\n\n @classmethod\n def from_function(\n cls,\n func: Optional[Callable],\n name: str, # We keep these required to support backwards compatibility\n description: str,\n return_direct: bool = False,\n args_schema: Optional[Type[BaseModel]] = None,\n coroutine: Optional[\n Callable[..., Awaitable[Any]]\n ] = None, # This is last for compatibility, but should be after func\n **kwargs: Any,\n ) -> Tool:\n \"\"\"Initialize tool from a function.\"\"\"\n if func is None and coroutine is None:\n raise ValueError(\"Function and/or coroutine must be provided\")\n return cls(\n name=name,\n func=func,\n coroutine=coroutine,\n description=description,\n return_direct=return_direct,\n args_schema=args_schema,\n **kwargs,\n )" }, { "identifier": "log", "path": "src/utils/logger_utils.py", "snippet": "def log(self, latency: float, answer: str, label: str, key: str) -> None:\n self._latency_dict[key].append(latency)\n self._answer_dict[key].append(answer)\n self._label_dict[key].append(label)" } ]
import asyncio from typing import Any, Dict, List, Mapping, Optional, Sequence, Union, cast from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain.llms import BaseLLM from langchain.prompts.base import StringPromptValue from src.callbacks.callbacks import AsyncStatsCallbackHandler from src.chains.chain import Chain from src.llm_compiler.constants import JOINNER_REPLAN from src.llm_compiler.planner import Planner from src.llm_compiler.task_fetching_unit import Task, TaskFetchingUnit from src.tools.base import StructuredTool, Tool from src.utils.logger_utils import log
12,299
return stats def reset_all_stats(self): if self.planner_callback: self.planner_callback.reset() if self.executor_callback: self.executor_callback.reset() @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] # TODO(sk): move all join related functions to a separate class def _parse_joinner_output(self, raw_answer: str) -> str: """We expect the joinner output format to be: ``` Thought: xxx Action: Finish/Replan(yyy) ``` Returns: thought (xxx) answer (yyy) is_replan (True/False) """ thought, answer, is_replan = "", "", False # default values raw_answers = raw_answer.split("\n") for ans in raw_answers: if ans.startswith("Action:"): answer = ans[ans.find("(") + 1 : ans.find(")")] is_replan = JOINNER_REPLAN in ans elif ans.startswith("Thought:"): thought = ans.split("Thought:")[1].strip() return thought, answer, is_replan def _generate_context_for_replanner( self, tasks: Mapping[int, Task], joinner_thought: str ) -> str: """Formatted like this: ``` 1. action 1 Observation: xxx 2. action 2 Observation: yyy ... Thought: joinner_thought ``` """ previous_plan_and_observations = "\n".join( [ task.get_though_action_observation( include_action=True, include_action_idx=True ) for task in tasks.values() if not task.is_join ] ) joinner_thought = f"Thought: {joinner_thought}" context = "\n\n".join([previous_plan_and_observations, joinner_thought]) return context def _format_contexts(self, contexts: Sequence[str]) -> str: """contexts is a list of context each context is formatted as the description of _generate_context_for_replanner """ formatted_contexts = "" for context in contexts: formatted_contexts += f"Previous Plan:\n\n{context}\n\n" formatted_contexts += "Current Plan:\n\n" return formatted_contexts async def join( self, input_query: str, agent_scratchpad: str, is_final: bool ) -> str: if is_final: joinner_prompt = self.joinner_prompt_final else: joinner_prompt = self.joinner_prompt prompt = ( f"{joinner_prompt}\n" # Instructions and examples f"Question: {input_query}\n\n" # User input query f"{agent_scratchpad}\n" # T-A-O # "---\n" ) log("Joining prompt:\n", prompt, block=True) response = await self.agent.arun( prompt, callbacks=[self.executor_callback] if self.benchmark else None ) raw_answer = cast(str, response.generations[0][0].message.content) log("Question: \n", input_query, block=True) log("Raw Answer: \n", raw_answer, block=True) thought, answer, is_replan = self._parse_joinner_output(raw_answer) if is_final: # If final, we don't need to replan is_replan = False return thought, answer, is_replan def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ): raise NotImplementedError("LLMCompiler is async only.") async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: contexts = [] joinner_thought = "" agent_scratchpad = "" for i in range(self.max_replans): is_first_iter = i == 0 is_final_iter = i == self.max_replans - 1
class LLMCompilerAgent: """Self defined agent for LLM Compiler.""" def __init__(self, llm: BaseLLM) -> None: self.llm = llm async def arun(self, prompt: str, callbacks=None) -> str: return await self.llm.agenerate_prompt( prompts=[StringPromptValue(text=prompt)], stop=None, callbacks=callbacks, ) class LLMCompiler(Chain, extra="allow"): """LLMCompuler Engine.""" """The step container to use.""" input_key: str = "input" output_key: str = "output" def __init__( self, tools: Sequence[Union[Tool, StructuredTool]], planner_llm: BaseLLM, planner_example_prompt: str, planner_example_prompt_replan: Optional[str], planner_stop: Optional[list[str]], planner_stream: bool, agent_llm: BaseLLM, joinner_prompt: str, joinner_prompt_final: Optional[str], max_replans: int, benchmark: bool, **kwargs, ) -> None: """ Args: tools: List of tools to use. max_replans: Maximum number of replans to do. benchmark: Whether to collect benchmark stats. Planner Args: planner_llm: LLM to use for planning. planner_example_prompt: Example prompt for planning. planner_example_prompt_replan: Example prompt for replanning. Assign this if you want to use different example prompt for replanning. If not assigned, default to `planner_example_prompt`. planner_stop: Stop tokens for planning. planner_stream: Whether to stream the planning. Agent Args: agent_llm: LLM to use for agent. joinner_prompt: Prompt to use for joinner. joinner_prompt_final: Prompt to use for joinner at the final replanning iter. If not assigned, default to `joinner_prompt`. """ super().__init__(**kwargs) if not planner_example_prompt_replan: log( "Replan example prompt not specified, using the same prompt as the planner." ) planner_example_prompt_replan = planner_example_prompt self.planner = Planner( llm=planner_llm, example_prompt=planner_example_prompt, example_prompt_replan=planner_example_prompt_replan, tools=tools, stop=planner_stop, ) self.agent = LLMCompilerAgent(agent_llm) self.joinner_prompt = joinner_prompt self.joinner_prompt_final = joinner_prompt_final or joinner_prompt self.planner_stream = planner_stream self.max_replans = max_replans # callbacks self.benchmark = benchmark if benchmark: self.planner_callback = AsyncStatsCallbackHandler(stream=planner_stream) self.executor_callback = AsyncStatsCallbackHandler(stream=False) else: self.planner_callback = None self.executor_callback = None def get_all_stats(self): stats = {} if self.benchmark: stats["planner"] = self.planner_callback.get_stats() stats["executor"] = self.executor_callback.get_stats() stats["total"] = { k: v + stats["executor"][k] for k, v in stats["planner"].items() } return stats def reset_all_stats(self): if self.planner_callback: self.planner_callback.reset() if self.executor_callback: self.executor_callback.reset() @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] # TODO(sk): move all join related functions to a separate class def _parse_joinner_output(self, raw_answer: str) -> str: """We expect the joinner output format to be: ``` Thought: xxx Action: Finish/Replan(yyy) ``` Returns: thought (xxx) answer (yyy) is_replan (True/False) """ thought, answer, is_replan = "", "", False # default values raw_answers = raw_answer.split("\n") for ans in raw_answers: if ans.startswith("Action:"): answer = ans[ans.find("(") + 1 : ans.find(")")] is_replan = JOINNER_REPLAN in ans elif ans.startswith("Thought:"): thought = ans.split("Thought:")[1].strip() return thought, answer, is_replan def _generate_context_for_replanner( self, tasks: Mapping[int, Task], joinner_thought: str ) -> str: """Formatted like this: ``` 1. action 1 Observation: xxx 2. action 2 Observation: yyy ... Thought: joinner_thought ``` """ previous_plan_and_observations = "\n".join( [ task.get_though_action_observation( include_action=True, include_action_idx=True ) for task in tasks.values() if not task.is_join ] ) joinner_thought = f"Thought: {joinner_thought}" context = "\n\n".join([previous_plan_and_observations, joinner_thought]) return context def _format_contexts(self, contexts: Sequence[str]) -> str: """contexts is a list of context each context is formatted as the description of _generate_context_for_replanner """ formatted_contexts = "" for context in contexts: formatted_contexts += f"Previous Plan:\n\n{context}\n\n" formatted_contexts += "Current Plan:\n\n" return formatted_contexts async def join( self, input_query: str, agent_scratchpad: str, is_final: bool ) -> str: if is_final: joinner_prompt = self.joinner_prompt_final else: joinner_prompt = self.joinner_prompt prompt = ( f"{joinner_prompt}\n" # Instructions and examples f"Question: {input_query}\n\n" # User input query f"{agent_scratchpad}\n" # T-A-O # "---\n" ) log("Joining prompt:\n", prompt, block=True) response = await self.agent.arun( prompt, callbacks=[self.executor_callback] if self.benchmark else None ) raw_answer = cast(str, response.generations[0][0].message.content) log("Question: \n", input_query, block=True) log("Raw Answer: \n", raw_answer, block=True) thought, answer, is_replan = self._parse_joinner_output(raw_answer) if is_final: # If final, we don't need to replan is_replan = False return thought, answer, is_replan def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ): raise NotImplementedError("LLMCompiler is async only.") async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, Any]: contexts = [] joinner_thought = "" agent_scratchpad = "" for i in range(self.max_replans): is_first_iter = i == 0 is_final_iter = i == self.max_replans - 1
task_fetching_unit = TaskFetchingUnit()
5
2023-12-06 21:12:54+00:00
16k
bytedance/ImageDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n mesh = trimesh.load(mesh_path)\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = (density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
14,365
nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-12-13 21:09:37+00:00
16k
allenai/unified-io-2
t5x/models_test.py
[ { "identifier": "decoding", "path": "t5x/decoding.py", "snippet": "NEG_INF = np.array(-1.0e7)\nMIN_TEMPERATURE = np.array(1e-4)\nclass DecodingState:\nclass SamplingLoopState:\nclass BeamState:\ndef _is_tracer(value: Any):\ndef temperature_sample(\n inputs: jnp.ndarray,\n cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: Callable[[DecodingState],\n Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]],\n eos_id: int,\n decode_rng: Optional[jnp.ndarray] = None,\n num_decodes: int = 1,\n temperature: Union[float, jnp.ndarray] = 1.0,\n topk: int = 1,\n topp: float = 0.0,\n cache_offset: int = 0,\n initial_index: Optional[jnp.ndarray] = None,\n max_decode_steps: Optional[Union[int, jnp.ndarray]] = None,\n max_decode_steps_hard_limit: Optional[int] = None,\n rescale_log_probs: bool = True,\n state_callback_fn: Optional[Callable[[SamplingLoopState],\n SamplingLoopState]] = None,\n logit_callback_fn: Optional[Callable[[jnp.ndarray, SamplingLoopState],\n jnp.ndarray]] = None\n) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef _temperature_sample_single_trial(\n inputs: jnp.ndarray,\n cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: Callable[[DecodingState],\n Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]],\n eos_id: int,\n prng_key: jnp.ndarray,\n temperature: Union[float, jnp.ndarray] = 1.0,\n topk: int = 20,\n topp: Union[float, jnp.ndarray] = 0.0,\n initial_index: Optional[jnp.ndarray] = None,\n max_decode_steps: Optional[Union[int, jnp.ndarray]] = None,\n rescale_log_probs: bool = True,\n state_callback_fn: Optional[Callable[[SamplingLoopState],\n SamplingLoopState]] = None,\n logit_callback_fn: Optional[Callable[[jnp.ndarray, SamplingLoopState],\n jnp.ndarray]] = None\n) -> jnp.ndarray:\n def sampling_loop_cond_fn(state: SamplingLoopState) -> bool:\n def sampling_loop_body_fn(state: SamplingLoopState) -> SamplingLoopState:\n def sample_logits_with_nonzero_temperature(logits):\n def sample_logits_with_zero_temperature(logits):\ndef brevity_penalty(alpha: float, length: int) -> jnp.ndarray:\ndef cache_map(fn, cache, apply_to_index: bool = False):\ndef add_beam_dim(x: jnp.ndarray,\n beam_size: int,\n offset: int = 0) -> jnp.ndarray:\ndef flatten_beam_dim(x: jnp.ndarray, offset: int = 0) -> jnp.ndarray:\ndef unflatten_beam_dim(x: jnp.ndarray,\n batch_size: int,\n beam_size: int,\n offset: int = 0) -> jnp.ndarray:\ndef flat_batch_beam_expand(x: jnp.ndarray,\n beam_size: int,\n offset: int = 0) -> jnp.ndarray:\ndef cache_gather_beams(nested: PyTreeDef,\n beam_indices: jnp.ndarray,\n batch_size: int,\n old_beam_size: int,\n new_beam_size: int,\n one_hot: bool = True,\n offset: int = 0) -> jnp.ndarray:\n def gather_fn(x):\n def gather_fn(x):\n def gather_fn(x):\n def gather_fn(x):\ndef gather_beams(nested: PyTreeDef,\n beam_indices: jnp.ndarray,\n batch_size: int,\n old_beam_size: int,\n new_beam_size: int,\n one_hot: bool = True) -> jnp.ndarray:\n def gather_fn(x):\n def gather_fn(x):\ndef top_k_two_stage(x, k):\ndef gather_topk_beams(nested: PyTreeDef, score_or_log_prob: jnp.ndarray,\n batch_size: int, new_beam_size: int) -> jnp.ndarray:\ndef beam_init(batch_size: int,\n beam_size: int,\n max_decode_len: int,\n cache: Mapping[str, jnp.ndarray],\n offset: int = 0) -> BeamState:\ndef beam_search(inputs: jnp.ndarray,\n cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: Callable[[DecodingState],\n Tuple[jnp.ndarray,\n Mapping[str, jnp.ndarray]]],\n eos_id: int,\n num_decodes: int = 4,\n alpha: float = 0.6,\n max_decode_len: Optional[int] = None,\n decode_rng: Optional[jnp.ndarray] = None,\n cache_offset: int = 0) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def beam_search_loop_cond_fn(state: BeamState) -> bool:\n def beam_search_loop_body_fn(state: BeamState) -> BeamState:" }, { "identifier": "models", "path": "t5x/models.py", "snippet": "class TokensIdsToLogitsCallable(typing_extensions.Protocol):\nclass DecodeFnCallable(typing_extensions.Protocol):\nclass BaseModel(abc.ABC):\nclass BaseTransformerModel(BaseModel):\nclass EncoderDecoderModel(BaseTransformerModel):\nclass DecoderOnlyModel(BaseTransformerModel):\n def __call__(\n self, decoding_state: decoding.DecodingState\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jax.random.KeyArray],\n cache_offset: int, **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def __init__(self, optimizer_def: optimizers.OptimizerDefType):\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def eval_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def predict_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: Optional[DecodeFnCallable] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[Union[\n float, int, str, losses.SpecialLossNormalizingFactor]] = None,\n ):\n def input_vocabulary(self):\n def output_vocabulary(self):\n def decode_fn(self):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def _compute_metrics(\n self,\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n ) -> MetricsMap:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.beam_search,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False,\n other_variables: Optional[PyTreeDef] = None,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n def _compute_logits_from_slice(\n self, decoding_state: decoding.DecodingState, params: PyTreeDef,\n encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n prompt_with_targets: bool = False\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n def __init__(\n self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.temperature_sample,\n inputs_bidirectional_attention: bool = False,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _get_decoder_causal_attention(self, batch):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False) -> jnp.ndarray:\n def _compute_logits_from_slice(\n self,\n decoding_state: decoding.DecodingState,\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def _compute_kv_cache(\n self,\n params: PyTreeDef,\n inputs: jnp.ndarray,\n inputs_lengths: jnp.ndarray,\n decoder_causal_attention: jnp.ndarray,\n ) -> PyTreeDef:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef compute_metrics(logits: jnp.ndarray, targets: jnp.ndarray,\n weights: jnp.ndarray, loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n additional_metrics: MetricsMap) -> MetricsMap:\ndef compute_base_metrics(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n) -> MetricsMap:\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n FEATURE_CONVERTER_CLS: Callable[..., seqio.FeatureConverter]\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter" }, { "identifier": "partitioning", "path": "t5x/partitioning.py", "snippet": "class AxisNames(tuple):\nclass LocalChunkInfo:\nclass LocalChunker:\nclass DataLayout:\nclass BasePartitioner(metaclass=abc.ABCMeta):\nclass PjittedFnWithContext(PartitionedCallable):\nclass BasePjitPartitioner(BasePartitioner):\nclass PjitPartitioner(BasePjitPartitioner):\n def __new__(cls, *names):\n def __repr__(self):\ndef pjit(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef pjit_with_cpu_fallback(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef with_sharding_constraint(x, axis_resources):\ndef bounds_from_last_device(\n last_device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef get_coords(device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef global_mesh_defined():\ndef get_mesh(model_parallel_submesh: HardwareMesh,\n input_devices: Sequence[JaxDevice] = (),\n input_local_devices: Sequence[JaxDevice] = (),\n tile_by_host_if_needed: bool = True,\n backend: Optional[str] = None) -> Mesh:\n def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:\ndef get_cpu_mesh() -> Mesh:\ndef get_gpu_mesh(num_partitions: int) -> Mesh:\ndef default_mesh(num_partitions: int,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n backend: Optional[str] = None) -> Mesh:\n def __init__(self, global_mesh: Mesh):\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\ndef standard_logical_axis_rules(\n activation_partitioning_dims: int = 1,\n parameter_partitioning_dims: int = 1,\n additional_rules: Optional[LogicalAxisRules] = None) -> LogicalAxisRules:\ndef _id_fn(x, ix):\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None):\n def mesh(self) -> Mesh:\n def data_partition_spec(self) -> PartitionSpec:\n def get_data_layout(self,\n batch_size: Optional[int] = None,\n host_index: Optional[int] = None) -> DataLayout:\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\n def params_on_devices(self):\n def move_params_to_devices(self, train_state: TrainState,\n train_state_axes: TrainState) -> TrainState:\n def _local_chunker(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PartitionedCallable:\n def compile(self, partitioned_fn: PartitionedCallable,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n pjitted_fn,\n partition_mesh: Mesh,\n logical_axis_rules: flax_partitioning.LogicalRules = ()):\n def __call__(self, *args):\n def lower(self, *args):\n def _local_chunker(self) -> LocalChunker:\n def mesh(self) -> Mesh:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def compile(self, partitioned_fn: PjittedFnWithContext,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None,\n logical_axis_rules: Optional[LogicalAxisRules] = None,\n use_cpu_pjit: Optional[bool] = False):\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def logical_axis_rules(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def _logical_to_mesh_axes(param_name, logical_axes):" }, { "identifier": "test_utils", "path": "t5x/test_utils.py", "snippet": "class CpuDevice:\nclass GpuDevice:\nclass TpuDevice:\n class DummyVocab:\ndef coords_to_idx(coords: Tuple[int, ...], bounds: Tuple[int, ...]) -> int:\ndef make_devices(nx: int,\n ny: int,\n nz: int,\n nc: int = 2,\n host_layout: Tuple[int, ...] = (2, 2, 1, 2),\n kind='TPU v3'):\ndef get_t5_test_model(**config_overrides) -> models.EncoderDecoderModel:\ndef with_mesh(named_shape: MeshSpec) -> Generator[None, None, None]:\ndef create_global_mesh(mesh_shape, axis_names):\ndef get_fake_vocab():\ndef get_fake_tokenized_dataset(*_, split='validation', **__):\ndef assert_equal(a, b):\ndef assert_same(tree_a, tree_b):\ndef get_train_state_from_variables(variables,\n optimizer_def=adafactor.Adafactor(0.0)):\n_FAKE_TOKENIZED_DATASET = {\n 'train': [\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: this',\n 'targets': (3, 8, 6, 3, 5, 10),\n 'targets_pretokenized': 'is a test'\n },\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: that',\n 'targets': (17, 5, 6, 3, 5, 10),\n 'targets_pretokenized': 'was a test'\n },\n {\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: those',\n 'targets': (17, 4, 23, 4, 10, 6),\n 'targets_pretokenized': 'were tests'\n },\n ],\n # Notice that we repeat consecutively each examples 4 times,\n # this needed for tests like infer_tests to validate determinism.\n 'validation': [{\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 16),\n 'inputs_pretokenized': 'complete: this',\n 'targets': (3, 8, 6, 3, 5, 3, 25, 5),\n 'targets_pretokenized': 'is a validation',\n }] * 4 + [{\n 'inputs': (3, 13, 7, 14, 15, 9, 4, 17),\n 'inputs_pretokenized': 'complete: that',\n 'targets': (17, 5, 6, 3, 5, 22, 7, 24),\n 'targets_pretokenized': 'was another validation',\n }] * 4\n}" }, { "identifier": "trainer", "path": "t5x/trainer.py", "snippet": "def _merge_metrics(a, b):\ndef merge_metrics(a, b):\n def result(self) -> Mapping[str, Array]:\n def result(self) -> Mapping[str, clu.values.Value]:\n def result(self) -> float:\n def __call__(\n self,\n step: jnp.ndarray,\n ) -> jnp.ndarray:\n def __call__(self, metrics: MetricMapType, duration: float,\n num_steps: int) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, train_state: train_state_lib.TrainState,\n batch: BatchType) -> Tuple[train_state_lib.TrainState, MetricMapType]:\n def __call__(self, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def _make_rms_metrics(name, tree):\n def _make_max_metrics(name, tree):\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def __init__(self):\n def close(self):\n def __del__(self):\n def _get_completion_future(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def _get_completion_time():\n def start(self, block_on: PyTreeDef = ()):\n def stop(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def __init__(self, name: str, summary_dir: Optional[str] = None, log_to_wandb=False):\n def __del__(self):\n def close(self):\n def summary_writer(self) -> metric_writers.MetricWriter:\n def write_scalar(self, key: str, val: metric_writers.interface.Scalar,\n step: int):\n def write_scalars(self, step: int,\n scalars: Mapping[str, metric_writers.interface.Scalar]):\n def start_duration_timer(self, block_on: PyTreeDef = ()):\n def write_metrics_summary(self, metrics: MetricMapType, step: int,\n num_steps: int) -> MetricValueMapFuture:\n def _summarize_and_write():\n def _ensure_not_on_device(x):\n def flush(self):\n def __init__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng,\n use_wandb=False, packing_strategy=None, log_weights=None):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, traceback):\n def close(self):\n def _get_step_rng(self, step: int) -> Rng:\n def train_state(self):\n def train_state(self, train_state: PyTreeDef):\n def _weight_metric_fn(self):\n def _get_weight_metrics_fn(_params):\n def train(self,\n batch_iter: Union[Iterator[BatchType],\n clu.data.dataset_iterator.DatasetIterator],\n num_steps: int,\n start_step: Optional[int] = None) -> ArrayMapFuture:\n def compile_train(self, batch: ElementSpec) -> None:\n def eval(\n self, batch_iters: Mapping[str,\n Iterator[BatchType]], pbar_nsteps=None) -> Mapping[str, Array]:\n def compile_eval(self, batches: Mapping[str, BatchType]) -> None:\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef accumulate_grads_microbatched(\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n dropout_rng: Rng,\n num_microbatches: Optional[int],\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n) -> Tuple[train_state_lib.TrainState, MutableMetricMapType,\n def get_microbatch(batch: BatchType, idx: int) -> Mapping[str, jnp.ndarray]:\n def metrics_and_grad(loop_cnt, dropout_rng, flax_mutables=None):\n def per_microbatch_train_step(\n loop_cnt: int, state: Tuple[jnp.ndarray, jnp.ndarray,\n Mapping[str, jnp.ndarray],\n Optional[FlaxMutables]]\n ) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray],\ndef apply_grads(\n train_state: train_state_lib.TrainState,\n grad_accum: ModelWeights,\n metrics: MutableMetricMapType,\n learning_rate: jnp.ndarray,\n weight_metrics_computer: Optional[WeightMetricsComputer],\n other_state_variables: Optional[Mapping[str, Any]] = None\n) -> Tuple[train_state_lib.TrainState, MetricMapType]:\ndef eval_step(model: models.BaseModel, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\ndef train_with_lr(\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n learning_rate: jnp.ndarray,\n dropout_rng: Rng,\n model: models.BaseModel,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n):\n def __call__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng) -> BaseTrainer:\n def __init__(self,\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str],\n summary_dir: Optional[str],\n train_state_axes: Any,\n rng: Rng,\n learning_rate_fn: LearningRateCallable,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n use_wandb=True,\n packing_strategy=None,\n log_weights=False\n ):\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def train_step(train_state: train_state_lib.TrainState, batch: BatchType, static_args=None):\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef _warn_action_not_run(action, task, metric):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self,\n metric: Tuple[str, str],\n mode: str,\n patience: int = 3,\n atol: float = 0.,\n rtol: float = 0.):\n def _compare_fn(self, current, previous):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self, task: str, metric: str = \"loss\"):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\nclass ArrayMapFuture(typing_extensions.Protocol):\nclass MetricValueMapFuture(typing_extensions.Protocol):\nclass TimeFuture(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass SummarizeMetricsCallable(typing_extensions.Protocol):\nclass PartitionedTrainCallable(typing_extensions.Protocol):\nclass PartitionedEvalCallable(typing_extensions.Protocol):\nclass GradNormComputer(object):\nclass WeightMetricsComputer(object):\nclass _AsyncTimer(object):\nclass MetricsManager(object):\nclass PreemptionError(Exception):\nclass BaseTrainer(abc.ABC):\nclass BaseTrainerConstructor(Protocol):\nclass Trainer(BaseTrainer):\nclass ActionMode(enum.Enum):\nclass BaseAction(abc.ABC):\nclass EarlyStoppingAction(BaseAction):\nclass TerminateOnNanAction(BaseAction):\n _WEIGHT_METRICS = [\n \"weight_rms\", \"weight_gradient_rms\", \"weight_update_rms\", \"weight_max\"\n ]\n TRAIN = 1\n TRAIN_EVAL = 2\n INFER_EVAL = 3" }, { "identifier": "utils", "path": "t5x/utils.py", "snippet": "class EvaluatorConstructor(typing_extensions.Protocol):\nclass SaveCheckpointConfig:\nclass RestoreCheckpointConfig:\nclass CheckpointConfig:\nclass LegacyCheckpointer(orbax.checkpoint.Checkpointer):\nclass LegacyCheckpointManager(orbax.checkpoint.CheckpointManager):\nclass DatasetConfig:\nclass GDADatasetIterator(clu.data.dataset_iterator.DatasetIterator):\nclass InitFnCallable(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass TrainStateInitializer:\nclass InferStepWithRngCallable(typing_extensions.Protocol):\nclass InferStepWithoutRngCallable(typing_extensions.Protocol):\nclass InferFnCallable(typing_extensions.Protocol):\nclass GetDatasetCallable(typing_extensions.Protocol):\nclass GetEvalDatasetCallable(typing_extensions.Protocol):\nclass _RegexMap(collections.abc.Mapping):\n def __call__(\n self,\n mixture_or_task_name: str,\n feature_converter: seqio.FeatureConverter,\n eval_split: str,\n use_cached: bool,\n seed: Optional[int],\n sequence_length: Optional[Mapping[str, int]],\n log_dir: Optional[str],\n use_memory_cache: bool,\n ) -> seqio.Evaluator:\n def __post_init__(self):\n def __post_init__(self):\n def __init__(self,\n *,\n save_checkpointer: Optional[checkpoints.Checkpointer] = None,\n restore_checkpointer: checkpoints.Checkpointer,\n strict: Optional[bool] = False):\n async def async_save(self, path: str, item: Any):\n async def async_restore(self, path: str, item: Optional[Any] = None) -> Any:\n def save(self,\n path: str,\n item: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def restore(self,\n path: str,\n item: Optional[train_state_lib.TrainState],\n state_transformation_fns: Sequence[\n checkpoints.RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def __init__(self,\n *,\n save_cfg: Optional[SaveCheckpointConfig] = None,\n restore_cfg: RestoreCheckpointConfig,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n ds_iter: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n model_dir: Optional[str] = None,\n use_gda: Optional[bool] = True):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = ()):\n def restore(\n self,\n paths: Sequence[str],\n restore_cfg: RestoreCheckpointConfig,\n fallback_state: Optional[Mapping[str, Any]] = None\n ) -> Union[train_state_lib.TrainState, Sequence[train_state_lib.TrainState]]:\ndef _get_index_mappings(device_to_idxs):\ndef _create_gda(partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef, host_arrays: PyTreeDef) -> PyTreeDef:\n def _put_to_devices(x, global_shape):\n def _gda(dbs, global_shape):\n def __init__(self, iterator: clu.data.dataset_iterator.DatasetIterator,\n partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef):\n def __next__(self):\n def reset(self):\n def element_spec(self):\n def save(self, filename):\n def restore(self, filename):\n def iterator(self):\ndef sync_global_devices(name: str) -> None:\ndef multihost_assert_equal(input_tree, fail_message: str = ''):\ndef _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\ndef _hardware_bernoulli(\n rng_key: Array, p: np.ndarray = np.float32(0.5),\n shape: Shape = ()) -> Array:\ndef set_hardware_rng_ops():\ndef get_zeros_batch_like_spec(\n batch_spec: Mapping[str,\n jax.ShapeDtypeStruct]) -> Mapping[str, jnp.ndarray]:\ndef get_zeros_batch_like_dataset(dataset: tf.data.Dataset,\n batch_size=None) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, rng: Array, input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str,\n DType]]) -> flax_scope.FrozenVariableDict:\n def __call__(self, step: jnp.ndarray) -> jnp.ndarray:\ndef create_learning_rate_scheduler(\n factors: str = 'constant * linear_warmup * rsqrt_decay',\n base_learning_rate: float = 0.5,\n warmup_steps: int = 1000,\n decay_factor: float = 0.5,\n steps_per_decay: int = 20000,\n steps_per_cycle: int = 100000,\n step_offset: int = 0,\n min_learning_rate: float = 1e-8) -> LearningRateCallable:\n def step_fn(step: jnp.ndarray) -> jnp.ndarray:\ndef steps(prefix, config, data_size=None, batch_size=None, default=ValueError):\ndef create_vision_learning_rate_scheduler(\n total_steps, batch_size=None, data_size=None,\n base=1.0, decay_type=\"stair\",\n scale_with_batchsize=False, **kw):\n def step_fn(step):\ndef get_first_valid_restore_config_and_paths(\n restore_cfgs: Sequence[RestoreCheckpointConfig]\n) -> Tuple[Optional[RestoreCheckpointConfig], Sequence[str]]:\ndef get_fallback_state(restore_cfg: RestoreCheckpointConfig,\n init_fn: Callable[[jnp.ndarray], Mapping[str, Any]],\n init_rng: jnp.ndarray) -> Optional[Mapping[str, Any]]:\n def __init__(self,\n optimizer_def: Optional[optimizers.OptimizerDefType],\n init_fn: InitFnCallable,\n input_shapes: Mapping[str, Array],\n partitioner: partitioning.BasePartitioner,\n model=None,\n input_types: Optional[Mapping[str, DType]] = None):\n def initialize_train_state(rng: Array):\n def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n def from_checkpoints(\n self,\n restore_cfgs: Sequence[RestoreCheckpointConfig],\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None,\n ) -> Iterable[train_state_lib.TrainState]:\n def _restore_path(path, cfg):\n def from_checkpoint(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None\n ) -> Optional[train_state_lib.TrainState]:\n def from_checkpoint_or_scratch(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n init_rng: Array,\n ds_iter: Optional[tf.data.Iterator] = None) -> train_state_lib.TrainState:\ndef log_model_info(log_file: Optional[str],\n full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n def _log_info_and_write_to_file(writer, format_str, *args):\n def _log_variable(name: str, arr: Optional[np.ndarray],\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n def __call__(self,\n params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray],\n rng: jnp.ndarray = None) -> PyTreeDef:\n def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n def __call__(\n self,\n ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None\n ) -> Union[_InferFnResult, _InferFnWithAuxResult]:\ndef _remove_padding(all_inferences, all_indices):\ndef get_infer_fn(infer_step: InferStepCallable, batch_size: int,\n train_state_axes: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner, \n pbar=False) -> InferFnCallable:\n def infer_step_with_indices(params, batch, rng, indices):\n def infer_fn(ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None):\n def _copy_to_host_async(x):\ndef import_module(module: str):\ndef get_vocabulary(\n cfg: DatasetConfig) -> Tuple[seqio.Vocabulary, seqio.Vocabulary]:\ndef verify_matching_vocabs(cfg: DatasetConfig, model: Any):\ndef get_dataset(cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = False,\n batching_fn=None) -> tf.data.Dataset:\ndef get_dataset_inner(cfg: DatasetConfig,\n shard_info: seqio.ShardInfo,\n feature_converter_cls: Callable[...,\n seqio.FeatureConverter],\n seed: Optional[int] = None,\n num_epochs: Optional[int] = None,\n batching_fn=None\n ):\n def __call__(\n self,\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = True\n ) -> Union[clu.data.dataset_iterator.DatasetIterator, tf.data.Dataset]:\n def __call__(\n self, cfg: DatasetConfig, shard_id: int, num_shards: int, eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter]\n ) -> Mapping[str, tf.data.Dataset]:\ndef get_training_eval_datasets(\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n deterministic: bool = False,\n model_dir: Optional[str] = None,\n start_step: int = 0,\n) -> Mapping[str, tf.data.Dataset]:\n def _repeat_shard_batch_take_cache(ds: tf.data.Dataset):\ndef round_vocab_size_to_multiple(vocabulary: seqio.Vocabulary,\n divisor: int = 128):\ndef flatten_dict_string_keys(x):\ndef flatten_lists(lsts: Iterable[Iterable]) -> Sequence:\n def __init__(self, kvs: Sequence[Tuple[str, Any]]):\n def __getitem__(self, key: str) -> Any:\n def __len__(self) -> int:\n def __iter__(self) -> Iterable[Tuple[re.Pattern, Any]]:\ndef override_params_axes_names(\n model_variables: flax_scope.FrozenVariableDict,\n params_axes_names_override: Sequence[Tuple[str, Tuple[str, ...]]] = ()\n) -> flax_scope.FrozenVariableDict:\ndef get_local_data(x):" } ]
import functools import flax import jax import jax.numpy as jnp import numpy as np import t5.data.tasks # pylint:disable=unused-import import tensorflow as tf from unittest import mock from absl import logging from absl.testing import absltest from absl.testing import parameterized from flax import traverse_util from t5x import decoding from t5x import models from t5x import partitioning from t5x import test_utils from t5x import trainer as trainer_lib from t5x import utils
12,627
@parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), } model = test_utils.get_t5_test_model(vocab_size=50) module = model.module params = module.init( jax.random.PRNGKey(0), jnp.ones((batch_size, encoder_len)), jnp.ones((batch_size, max_decode_len)), jnp.ones((batch_size, max_decode_len)), enable_dropout=False)['params'] def mock_init(self): self.module = module # Set the EOS token to be larger then the vocabulary size. This forces the # model to decode all the way to `max_decode_length`, allowing us to test # behavior when one element reaches the end before the others. self._output_vocabulary = mock.Mock(eos_id=vocab_size + 12)
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for t5x.models.""" # Parse absl flags test_srcdir and test_tmpdir. jax.config.parse_flags_with_absl() PartitionSpec = partitioning.PartitionSpec class ModelsTest(parameterized.TestCase): def test_remove_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([2, 4]) expected = [[3, 4, 5, 6, 7, 0, 0, 0], [10, 11, 0, 0, 0, 0, 0, 0]] remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) np.testing.assert_array_equal(actual, expected) def test_remove_prefix_zero_len_prefix(self): sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]]) prefix_lengths = np.array([0, 0]) remove_prefix = jax.jit(models.remove_prefix) actual = remove_prefix(sequences, prefix_lengths) # The expected output is the original sequences. np.testing.assert_array_equal(actual, sequences) BATCH_SIZE, ENCODER_LEN, MAX_DECODE_LEN, EMBED_DIM = 2, 3, 4, 5 class EncoderDecoderModelTest(parameterized.TestCase): @parameterized.named_parameters( dict( testcase_name='no_types', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types=None), dict( testcase_name='int32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62] }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32 }), dict( testcase_name='float32', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_positions': [1, 512], 'decoder_positions': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_positions': jnp.int32, 'decoder_positions': jnp.int32 }), dict( testcase_name='float32_segment_ids', shapes={ 'encoder_input_tokens': [1, 512], 'decoder_input_tokens': [1, 62], 'encoder_segment_ids': [1, 512], 'decoder_segment_ids': [1, 62], }, types={ 'encoder_input_tokens': jnp.int32, 'decoder_input_tokens': jnp.int32, 'encoder_segment_ids': jnp.int32, 'decoder_segment_ids': jnp.int32 }), ) def test_get_initial_variables_shapes_and_types(self, shapes, types): mock_transformer = mock.Mock() mock_transformer.init.return_value = {'params': {}} mock_optimizer_def = mock.Mock() rng = mock.Mock() def mock_init(self): self.module = mock_transformer self.optimizer_def = mock_optimizer_def with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.get_initial_variables(rng, shapes, types) if types is None: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=jnp.float32) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=jnp.float32) else: encoder_input = jnp.ones( shapes['encoder_input_tokens'], dtype=types['encoder_input_tokens']) decoder_input = jnp.ones( shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens']) # Using `.assert_called_once_with` doesn't work because the simple # comparison it does for the array arguments fail (truth value of an array # is ambiguous). called_with = mock_transformer.init.call_args self.assertEqual(called_with[0][0], rng) np.testing.assert_allclose(called_with[0][1], encoder_input) np.testing.assert_allclose(called_with[0][2], decoder_input) np.testing.assert_allclose(called_with[0][3], decoder_input) if 'encoder_positions' in shapes: encoder_positions = jnp.ones( shapes['encoder_positions'], dtype=types['encoder_positions']) np.testing.assert_allclose(called_with[1]['encoder_positions'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_positions']) if 'decoder_positions' in shapes: decoder_positions = jnp.ones( shapes['decoder_positions'], dtype=types['decoder_positions']) np.testing.assert_allclose(called_with[1]['decoder_positions'], decoder_positions) else: self.assertIsNone(called_with[1]['decoder_positions']) if 'encoder_segment_ids' in shapes: encoder_positions = jnp.ones( shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids']) np.testing.assert_allclose(called_with[1]['encoder_segment_ids'], encoder_positions) else: self.assertIsNone(called_with[1]['encoder_segment_ids']) if 'decoder_segment_ids' in shapes: decoder_segment_ids = jnp.ones( shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids']) np.testing.assert_allclose(called_with[1]['decoder_segment_ids'], decoder_segment_ids) else: self.assertIsNone(called_with[1]['decoder_segment_ids']) self.assertFalse(called_with[1]['decode']) self.assertFalse(called_with[1]['enable_dropout']) @parameterized.named_parameters( dict(testcase_name='no_force_decoding', prompt_with_targets=False), dict(testcase_name='force_decoding', prompt_with_targets=True), ) def test_prompt_with_targets(self, prompt_with_targets): batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.full([batch_size, max_decode_len], 2, dtype=np.int32) } # These dummy logits represent the probability distribution where all the # probability mass is in one item (i.e., degenerate distribution). For # batch element 0, it is vocabulary index 3. # We test `_predict_step` to avoid having to define a task and its # vocabulary. dummy_logits = jnp.expand_dims( jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]), axis=1) mock_decode_fn = mock.Mock() mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1], 3, dtype=np.int32), np.full([batch_size, 1], 1.0, dtype=np.float32)) class MockModule: def __init__(self): self.dtype = jnp.float32 def apply(self, *args, method=None, **kwargs): del args, kwargs if method is None: # use for module.`__call__` return (dummy_logits, {'cache': {}}) else: return method() def encode(self): return jnp.zeros((batch_size, encoder_len, emb_dim)) def decode(self): return (dummy_logits, {'cache': {}}) def mock_init(self): self.module = MockModule() self.module.scan_layers = False self._input_vocabulary = mock.Mock(eos_id=1) self._output_vocabulary = mock.Mock(eos_id=1) self._decode_fn = mock_decode_fn with mock.patch.object( models.EncoderDecoderModel, '__init__', new=mock_init): model = models.EncoderDecoderModel() model.predict_batch_with_aux({}, batch, prompt_with_targets=prompt_with_targets) if prompt_with_targets: expected_inputs = batch['decoder_input_tokens'] else: expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32) assert mock_decode_fn.call_count == 1 # Look at the kwargs call list for inputs, assert_called_with doesn't # work well with np.array comparison. np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'], expected_inputs) def test_predict_batch_loop_and_caches_are_equal(self): vocab_size = 50 lengths = np.array([[2], [3]]) batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7 batch = { 'encoder_input_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_target_tokens': np.zeros((batch_size, encoder_len), dtype=np.int32), 'decoder_input_tokens': np.concatenate( [ np.expand_dims( np.concatenate( [[0], np.arange(9, 9 + lengths[0][0], dtype=np.int32), np.zeros((max_decode_len - lengths[0][0] - 1), dtype=np.int32)]), axis=0), # First element np.expand_dims( np.concatenate( [[0], np.arange(3, 3 + lengths[1][0], dtype=np.int32), np.zeros((max_decode_len - lengths[1][0] - 1), dtype=np.int32)]), axis=0) # Second element ], axis=0), } model = test_utils.get_t5_test_model(vocab_size=50) module = model.module params = module.init( jax.random.PRNGKey(0), jnp.ones((batch_size, encoder_len)), jnp.ones((batch_size, max_decode_len)), jnp.ones((batch_size, max_decode_len)), enable_dropout=False)['params'] def mock_init(self): self.module = module # Set the EOS token to be larger then the vocabulary size. This forces the # model to decode all the way to `max_decode_length`, allowing us to test # behavior when one element reaches the end before the others. self._output_vocabulary = mock.Mock(eos_id=vocab_size + 12)
self._decode_fn = decoding.beam_search
0
2023-12-12 20:23:33+00:00
16k
zju3dv/EasyVolcap
tests/headless_opengl_tests.py
[ { "identifier": "eglContextManager", "path": "easyvolcap/utils/egl_utils.py", "snippet": "class eglContextManager:\n # Manages the creation and destruction of an EGL context\n # Will resize if the size of the window changes\n # Will also manage gl.Viewport to render different parts of the screen\n # Only resize the underlying egl ctx when exceeding current size\n def __init__(self, W=1920, H=1080) -> None:\n self.H, self.W = H, W\n self.max_H, self.max_W = H, W # always create at first\n self.eglctx = create_opengl_context()\n self.create_fbo_with_rbos(W, H)\n self.resize(W, H) # maybe create new framebuffer\n\n def create_fbo_with_rbos(self, W: int, H: int):\n if hasattr(self, 'fbo'):\n gl.glDeleteFramebuffers(1, [self.fbo])\n gl.glDeleteRenderbuffers(6, [self.rbo0, self.rbo1, self.rbo2, self.rbo3, self.rbo4, self.rbo_dpt])\n\n # Add new buffer\n self.fbo = gl.glGenFramebuffers(1)\n self.rbo0, self.rbo1, self.rbo2, self.rbo3, self.rbo4, self.rbo_dpt = gl.glGenRenderbuffers(6)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo0)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo1)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo2)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo3)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo4)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo_dpt)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_DEPTH_COMPONENT, W, H)\n\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, self.rbo0)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo1)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo2)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo3)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo4)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_RENDERBUFFER, self.rbo_dpt)\n gl.glDrawBuffers(5, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2, gl.GL_COLOR_ATTACHMENT3, gl.GL_COLOR_ATTACHMENT4])\n\n gl.glViewport(0, 0, W, H) # wtf\n gl.glScissor(0, 0, W, H) # wtf # NOTE: Need to redefine scissor size\n\n def resize(self, W=1920, H=1080):\n self.H, self.W = H, W\n if self.H > self.max_H or self.W > self.max_W:\n self.max_H, self.max_W = max(int(self.H * 1.0), self.max_H), max(int(self.W * 1.0), self.max_W)\n self.create_fbo_with_rbos(self.max_W, self.max_H)\n gl.glViewport(0, 0, self.W, self.H)" }, { "identifier": "Quad", "path": "easyvolcap/utils/gl_utils.py", "snippet": "class Quad(Mesh):\n # A shared texture for CUDA (pytorch) and OpenGL\n # Could be rendererd to screen using blitting or just drawing a quad\n def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip\n self.use_cudagl = use_cudagl\n self.vert_sizes = [3] # only position\n self.vert_gl_types = [gl.GL_FLOAT] # only position\n self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type\n self.max_verts, self.max_faces = 0, 0\n self.verts = torch.as_tensor([[-1., -1., 0.5],\n [1., -1., 0.5],\n [-1., 1., 0.5],\n [1., 1., 0.5],])\n self.update_gl_buffers()\n self.compile_shaders()\n\n self.max_H, self.max_W = H, W\n self.H, self.W = H, W\n self.compose = compose\n self.compose_power = compose_power\n self.init_texture()\n\n @property\n def n_faces_bytes(self): return 0\n\n def use_gl_program(self, program: shaders.ShaderProgram):\n super().use_gl_program(program)\n self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')\n gl.glUseProgram(self.quad_program) # use a different program\n gl.glUniform1i(self.uniforms.tex, 0)\n\n def compile_shaders(self):\n try:\n self.quad_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)\n )\n except Exception as e:\n print(str(e).encode('utf-8').decode('unicode_escape'))\n raise e\n\n def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers\n self.H, self.W = H, W\n if self.H > self.max_H or self.W > self.max_W: # max got updated\n self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)\n self.init_texture()\n\n def init_texture(self):\n if hasattr(self, 'cu_tex'):\n from cuda import cudart\n CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))\n\n if hasattr(self, 'fbo'):\n gl.glDeleteFramebuffers(1, [self.fbo])\n gl.glDeleteTextures(1, [self.tex])\n\n # Init the texture to be blit onto the screen\n self.tex = gl.glGenTextures(1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n\n # Init the framebuffer object if explicit blitting is used (slower than drawing quad)\n self.fbo = gl.glGenFramebuffers(1)\n old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)\n\n if self.use_cudagl:\n from cuda import cudart\n if self.compose:\n # Both reading and writing of this resource is required\n flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone\n else:\n flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard\n self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))\n\n def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n assert self.use_cudagl, \"Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad\"\n w = w or self.W\n h = h or self.H\n if image.shape[-1] == 3:\n image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel\n\n from cuda import cudart\n kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice\n CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))\n cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))\n\n if self.compose:\n \"\"\"\n Blit current framebuffer to this texture (self.tex)\n Read content of this texture into a cuda buffer\n Perform alpha blending based on the frame's alpha channel\n Copy the blended image back into the texture (self.tex)\n \"\"\"\n old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0\n gl.glBlitFramebuffer(x, y, w, h,\n x, y, w, h,\n gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame\n gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)\n\n buffer = torch.empty_like(image)\n CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst\n w * 4 * buffer.element_size(), # dpitch\n cu_tex_arr, # src\n x * 4 * image.element_size(), # wOffset\n y, # hOffset\n w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)\n h, # height\n kind, # kind\n torch.cuda.current_stream().cuda_stream)) # stream\n\n # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])\n alpha = image[..., -1:] / 255\n image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int\n image[..., -1:] = buffer[..., -1:] + image[..., -1:]\n image = image.clip(0, 255)\n\n CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,\n x * 4 * image.element_size(),\n y,\n image.data_ptr(),\n w * 4 * image.element_size(), # differently sized\n w * 4 * image.element_size(), # rgba, should do a composition first\n h,\n kind,\n torch.cuda.current_stream().cuda_stream))\n CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))\n\n def upload_to_texture(self, ptr: np.ndarray):\n H, W = ptr.shape[:2]\n H, W = min(self.H, H), min(self.W, W)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down?\n\n @property\n def verts_data(self): # a heavy copy operation\n verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync\n verts = np.asarray(verts, dtype=np.float32, order='C')\n return verts\n\n def render(self, camera: Camera = None):\n self.draw() # no uploading needed\n\n def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n \"\"\"\n Upload the texture instead of the camera\n This respects the OpenGL convension of lower left corners\n \"\"\"\n w = w or self.W\n h = h or self.H\n _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)\n gl.glViewport(x, y, w, h)\n gl.glScissor(x, y, w, h) # only render in this small region of the viewport\n\n gl.glUseProgram(self.quad_program) # use a different program\n gl.glActiveTexture(gl.GL_TEXTURE0)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n\n gl.glBindVertexArray(self.vao)\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))\n gl.glBindVertexArray(0)\n\n # Some house keepings\n gl.glViewport(0, 0, W, H)\n gl.glScissor(0, 0, W, H)\n\n def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n \"\"\"\n This respects the OpenGL convension of lower left corners\n \"\"\"\n w = w or self.W\n h = h or self.H\n old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0\n gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped\n x, y, x + w, y + h, # the height is flipped\n gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)\n gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)" }, { "identifier": "Mesh", "path": "easyvolcap/utils/gl_utils.py", "snippet": "class Mesh:\n class RenderType(Enum):\n POINTS = 1\n LINES = 2\n TRIS = 3\n QUADS = 4 # TODO: Support quad loading\n STRIPS = 5\n\n # Helper class to render a mesh on opengl\n # This implementation should only be used for debug visualization\n # Since no differentiable mechanism will be added\n # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly\n\n def __init__(self,\n verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update\n faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update\n colors: torch.Tensor = None,\n normals: torch.Tensor = None,\n scalars: dotdict[str, torch.Tensor] = dotdict(),\n render_type: RenderType = RenderType.TRIS,\n\n # Misc info\n name: str = 'mesh',\n filename: str = '',\n visible: bool = True,\n\n # Render options\n shade_flat: bool = False, # smooth shading\n point_radius: float = 0.015,\n render_normal: bool = False,\n\n # Storage options\n store_device: str = 'cpu',\n compute_device: str = 'cuda',\n vert_sizes=[3, 3, 3], # pos + color + norm\n\n # Init options\n est_normal_thresh: int = 100000,\n\n # Ignore unused input\n **kwargs,\n ) -> None:\n super().__init__()\n self.name = name\n self.visible = visible\n self.render_type = render_type\n\n self.shade_flat = shade_flat\n self.point_radius = point_radius\n self.render_normal = render_normal\n\n self.store_device = store_device\n self.compute_device = compute_device\n self.vert_sizes = vert_sizes\n\n self.est_normal_thresh = est_normal_thresh\n\n # Uniform and program\n self.compile_shaders()\n self.uniforms = dotdict() # uniform values\n\n # Before initialization\n self.max_verts = 0\n self.max_faces = 0\n\n # OpenGL data\n if filename: self.load_from_file(filename)\n else: self.load_from_data(verts, faces, colors, normals, scalars)\n\n def compile_shaders(self):\n try:\n self.mesh_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)\n )\n self.point_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)\n )\n except Exception as e:\n print(str(e).encode('utf-8').decode('unicode_escape'))\n raise e\n\n @property\n def n_verts_bytes(self):\n return len(self.verts) * self.vert_size * self.verts.element_size()\n\n @property\n def n_faces_bytes(self):\n return len(self.faces) * self.face_size * self.faces.element_size()\n\n @property\n def verts_data(self): # a heavy copy operation\n verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync\n verts = np.asarray(verts, dtype=np.float32, order='C')\n return verts\n\n @property\n def faces_data(self): # a heavy copy operation\n faces = self.faces.ravel().numpy() # N, 3\n faces = np.asarray(faces, dtype=np.uint32, order='C')\n return faces\n\n @property\n def face_size(self):\n return self.render_type.value\n\n @property\n def vert_size(self):\n return sum(self.vert_sizes)\n\n def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):\n verts, faces, colors, normals, scalars = self.load_data_from_file(filename)\n self.load_from_data(verts, faces, colors, normals, scalars)\n\n def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):\n self.name = os.path.split(filename)[-1]\n verts, faces, colors, normals, scalars = None, None, None, None, None\n verts, faces = load_mesh(filename, device=self.store_device)\n if not len(faces):\n verts, colors, normals, scalars = load_pts(filename)\n self.render_type = Mesh.RenderType.POINTS\n else:\n self.render_type = Mesh.RenderType(faces.shape[-1]) # use value\n return verts, faces, colors, normals, scalars\n\n def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):\n # Data type conversion\n verts = torch.as_tensor(verts) # convert to tensor if input is of other types\n if verts.dtype == torch.float32:\n pass # supports this for now\n elif verts.dtype == torch.float16:\n pass # supports this for now\n else:\n verts = verts.type(torch.float) # convert to float32 if input is of higher precision\n gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT\n self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)\n\n # Prepare main mesh data: vertices and faces\n self.verts = torch.as_tensor(verts, device=self.store_device)\n self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support\n\n # Prepare colors and normals\n if colors is not None:\n self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)\n else:\n bounds = get_bounds(self.verts[None])[0]\n self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])\n if normals is not None:\n self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)\n else:\n self.estimate_vertex_normals()\n\n # Prepare other scalars\n if scalars is not None:\n for k, v in scalars.items():\n setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?\n\n # Prepare OpenGL related buffer\n self.update_gl_buffers()\n\n def estimate_vertex_normals(self):\n def est_pcd_norms():\n if self.verts.dtype == torch.half:\n self.normals = self.verts\n else:\n from pytorch3d.structures import Pointclouds, Meshes\n pcd = Pointclouds([self.verts]).to(self.compute_device)\n self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim\n\n def est_tri_norms():\n if self.verts.dtype == torch.half:\n self.normals = self.verts\n else:\n from pytorch3d.structures import Pointclouds, Meshes\n mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)\n self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim\n\n if not len(self.verts) > self.est_normal_thresh:\n if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()\n elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()\n else:\n # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))\n self.normals = self.verts\n else:\n # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))\n self.normals = self.verts\n\n def offscreen_render(self, eglctx: \"eglContextManager\", camera: Camera):\n eglctx.resize(camera.W, camera.H)\n self.render(camera)\n\n def render(self, camera: Camera):\n if not self.visible: return\n\n # For point rendering\n if self.render_type == Mesh.RenderType.POINTS:\n gl.glUseProgram(self.point_program)\n self.use_gl_program(self.point_program)\n else:\n gl.glUseProgram(self.mesh_program)\n self.use_gl_program(self.mesh_program)\n\n self.upload_gl_uniforms(camera)\n gl.glBindVertexArray(self.vao)\n\n if self.render_type == Mesh.RenderType.POINTS:\n gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices\n elif self.render_type == Mesh.RenderType.LINES:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.TRIS:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.QUADS:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.STRIPS:\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))\n else:\n raise NotImplementedError\n\n gl.glBindVertexArray(0)\n\n def use_gl_program(self, program: shaders.ShaderProgram):\n use_gl_program(program)\n self.uniforms.shade_flat = gl.glGetUniformLocation(program, \"shade_flat\")\n self.uniforms.point_radius = gl.glGetUniformLocation(program, \"point_radius\")\n self.uniforms.render_normal = gl.glGetUniformLocation(program, \"render_normal\")\n self.uniforms.H = gl.glGetUniformLocation(program, \"H\")\n self.uniforms.W = gl.glGetUniformLocation(program, \"W\")\n self.uniforms.n = gl.glGetUniformLocation(program, \"n\")\n self.uniforms.f = gl.glGetUniformLocation(program, \"f\")\n self.uniforms.P = gl.glGetUniformLocation(program, \"P\")\n self.uniforms.K = gl.glGetUniformLocation(program, \"K\")\n self.uniforms.V = gl.glGetUniformLocation(program, \"V\")\n self.uniforms.M = gl.glGetUniformLocation(program, \"M\")\n\n def upload_gl_uniforms(self, camera: Camera):\n K = camera.gl_ixt # hold the reference\n V = camera.gl_ext # hold the reference\n M = glm.identity(mat4)\n P = K * V * M\n\n gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)\n gl.glUniform1f(self.uniforms.point_radius, self.point_radius)\n gl.glUniform1i(self.uniforms.render_normal, self.render_normal)\n gl.glUniform1i(self.uniforms.H, camera.H) # o2w\n gl.glUniform1i(self.uniforms.W, camera.W) # o2w\n gl.glUniform1f(self.uniforms.n, camera.n) # o2w\n gl.glUniform1f(self.uniforms.f, camera.f) # o2w\n gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip\n gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip\n gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c\n gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w\n\n def update_gl_buffers(self):\n # Might be overwritten\n self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,\n len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated\n\n if hasattr(self, 'verts'):\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference\n if hasattr(self, 'faces'):\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)\n\n def resize_buffers(self, v: int = 0, f: int = 0):\n if v > self.max_verts or f > self.max_faces:\n if v > self.max_verts: self.max_verts = v\n if f > self.max_faces: self.max_faces = f\n self.init_gl_buffers(v, f)\n\n def init_gl_buffers(self, v: int = 0, f: int = 0):\n # This will only init the corresponding buffer object\n n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes\n n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes\n\n # Housekeeping\n if hasattr(self, 'vao'):\n gl.glDeleteVertexArrays(1, [self.vao])\n gl.glDeleteBuffers(2, [self.vbo, self.ebo])\n\n self.vao = gl.glGenVertexArrays(1)\n self.vbo = gl.glGenBuffers(1)\n self.ebo = gl.glGenBuffers(1)\n\n gl.glBindVertexArray(self.vao)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work\n\n # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao\n cumsum = 0\n for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):\n gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float\n gl.glEnableVertexAttribArray(i)\n cumsum += s\n\n if n_faces_bytes > 0:\n # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)\n gl.glBindVertexArray(0)\n\n def render_imgui(self):\n pass" }, { "identifier": "Camera", "path": "easyvolcap/utils/viewer_utils.py", "snippet": "class Camera:\n # Helper class to manage camera parameters\n def __init__(self,\n H: int = 512,\n W: int = 512,\n K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics\n R: torch.Tensor = torch.tensor([[-1.0, 0.0, 0.0,], [0.0, 0.0, -1.0,], [0.0, -1.0, 0.0,]]), # extrinsics\n T: torch.Tensor = torch.tensor([[0.0], [0.0], [-3.0],]), # extrinsics\n n: float = 0.002, # bounds limit\n f: float = 100, # bounds limit\n t: float = 0.0, # temporal dimension (implemented as a float instead of int)\n v: float = 0.0, # view dimension (implemented as a float instead of int)\n bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box\n\n # camera update hyperparameters\n origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),\n world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),\n movement_speed: float = 1.0, # gui movement speed\n\n batch: dotdict = None, # will ignore all other inputs\n string: str = None, # will ignore all other inputs\n **kwargs,\n ) -> None:\n\n # Batch (network input parameters)\n if string is None:\n if batch is None:\n batch = dotdict()\n batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds\n self.from_batch(batch)\n \n # Other configurables\n self.origin = vec3(*origin)\n self.world_up = vec3(*world_up)\n self.movement_speed = movement_speed\n # self.front = self.front # will trigger an update\n else:\n self.from_string(string)\n\n # Internal states to facilitate camera position change\n self.is_dragging = False # rotation\n self.about_origin = False # about origin rotation\n self.is_panning = False # translation\n self.lock_fx_fy = True\n\n @property\n def w2p(self):\n ixt = mat4(self.ixt)\n ixt[3, 3] = 0\n ixt[2, 3] = 1\n return ixt @ self.ext # w2c -> c2p = w2p\n\n @property\n def V(self): return self.c2w\n\n @property\n def ixt(self): return self.K\n\n @property\n def gl_ext(self):\n gl_c2w = self.c2w\n gl_c2w[0] *= 1 # flip x\n gl_c2w[1] *= -1 # flip y\n gl_c2w[2] *= -1 # flip z\n gl_ext = glm.affineInverse(gl_c2w)\n return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt\n\n @property\n def gl_ixt(self):\n # Construct opengl camera matrix with projection & clipping\n # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # fmt: off\n gl_ixt = mat4(\n 2 * self.fx / self.W, 0, 0, 0,\n 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n )\n # fmt: on\n\n return gl_ixt\n\n @property\n def ext(self): return self.w2c\n\n @property\n def w2c(self):\n w2c = mat4(self.R)\n w2c[3] = vec4(*self.T, 1.0)\n return w2c\n\n @property\n def c2w(self):\n return glm.affineInverse(self.w2c)\n\n @property\n def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,\n\n @property\n def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,\n\n @property\n def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,\n\n @front.setter\n def front(self, v: vec3):\n front = v # the last row of R\n self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z\n right = glm.normalize(glm.cross(self.front, self.world_up)) # right\n self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z\n down = glm.cross(self.front, self.right) # down\n self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z\n\n @property\n def center(self): return -glm.transpose(self.R) @ self.T # 3,\n\n @center.setter\n def center(self, v: vec3):\n self.T = -self.R @ v # 3, 1\n\n @property\n def s(self): return self.K[1, 0]\n\n @s.setter\n def s(self, s): self.K[1, 0] = s\n\n @property\n def fx(self): return self.K[0, 0]\n\n @fx.setter\n def fx(self, v: float):\n v = min(v, 1e5)\n v = max(v, 1e-3)\n if self.lock_fx_fy:\n self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]\n self.K[0, 0] = v\n\n @property\n def fy(self): return self.K[1, 1]\n\n @fy.setter\n def fy(self, v: float):\n if self.lock_fx_fy:\n self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]\n self.K[1, 1] = v\n\n @property\n def cx(self): return self.K[2, 0]\n\n @cx.setter\n def cx(self, v: float):\n self.K[2, 0] = v\n\n @property\n def cy(self): return self.K[2, 1]\n\n @cy.setter\n def cy(self, v: float):\n self.K[2, 1] = v\n\n def begin_dragging(self,\n x: float, y: float,\n is_panning: bool,\n about_origin: bool,\n ):\n self.is_dragging = True\n self.is_panning = is_panning\n self.about_origin = about_origin\n self.drag_start = vec2([x, y])\n\n # Record internal states # ? Will this make a copy?\n self.drag_start_front = self.front # a recording\n self.drag_start_down = self.down\n self.drag_start_right = self.right\n self.drag_start_center = self.center\n self.drag_start_origin = self.origin\n self.drag_start_world_up = self.world_up\n\n # Need to find the max or min delta y to align with world_up\n dot = glm.dot(self.world_up, self.drag_start_front)\n self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down\n self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin\n\n def end_dragging(self):\n self.is_dragging = False\n\n def update_dragging(self, x: float, y: float):\n if not self.is_dragging:\n return\n\n current = vec2(x, y)\n delta = current - self.drag_start\n delta /= max(self.H, self.W)\n delta *= -1\n\n if self.is_panning:\n delta *= self.movement_speed\n center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down\n self.center = self.drag_start_center + center_delta\n if self.about_origin:\n self.origin = self.drag_start_origin + center_delta\n else:\n m = mat4(1.0)\n m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)\n m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)\n self.front = m @ self.drag_start_front # might overshoot\n\n if self.about_origin:\n self.center = -m @ (self.origin - self.drag_start_center) + self.origin\n\n def move(self, x_offset: float, y_offset: float):\n speed_factor = 1e-1\n movement = y_offset * speed_factor\n movement = movement * self.front * self.movement_speed\n self.center += movement\n\n if self.is_dragging:\n self.drag_start_center += movement\n\n def to_batch(self):\n meta = dotdict()\n meta.H = torch.as_tensor(self.H)\n meta.W = torch.as_tensor(self.W)\n meta.K = torch.as_tensor(self.K.to_list()).mT\n meta.R = torch.as_tensor(self.R.to_list()).mT\n meta.T = torch.as_tensor(self.T.to_list())[..., None]\n meta.n = torch.as_tensor(self.n)\n meta.f = torch.as_tensor(self.f)\n meta.t = torch.as_tensor(self.t)\n meta.v = torch.as_tensor(self.v)\n meta.bounds = torch.as_tensor(self.bounds.to_list()) # no transpose for bounds\n\n # GUI related elements\n meta.movement_speed = torch.as_tensor(self.movement_speed)\n meta.origin = torch.as_tensor(self.origin.to_list())\n meta.world_up = torch.as_tensor(self.world_up.to_list())\n\n batch = dotdict()\n batch.update(meta)\n batch.meta.update(meta)\n return batch\n\n def to_easymocap(self):\n batch = self.to_batch()\n camera = to_numpy(batch)\n return camera\n\n def from_easymocap(self, camera: dict):\n batch = to_tensor(camera)\n self.from_batch(batch)\n return self\n\n def to_string(self) -> str:\n batch = to_list(self.to_batch().meta)\n return json.dumps(batch)\n\n def from_string(self, string: str):\n batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)\n self.from_batch(batch)\n\n def from_batch(self, batch: dotdict):\n H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds\n\n # Batch (network input parameters)\n self.H = int(H)\n self.W = int(W)\n self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel()) # 3,\n self.n = float(n)\n self.f = float(f)\n self.t = float(t)\n self.v = float(v)\n self.bounds = mat2x3(*bounds.ravel()) # 2, 3\n\n if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)\n if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,\n if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,\n return self\n\n def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):\n # self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel())" }, { "identifier": "save_image", "path": "easyvolcap/utils/data_utils.py", "snippet": "def save_image(img_path: str, img: np.ndarray, jpeg_quality=75, png_compression=9, save_dtype=np.uint8):\n if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy()\n if img.ndim == 4: img = np.concatenate(img, axis=0)\n if img.shape[0] < img.shape[-1] and (img.shape[0] == 3 or img.shape[0] == 4): img = np.transpose(img, (1, 2, 0))\n if np.issubdtype(img.dtype, np.integer):\n img = img / np.iinfo(img.dtype).max # to float\n if img.shape[-1] >= 3:\n if not img.flags['WRITEABLE']:\n img = img.copy() # avoid assignment only inputs\n img[..., :3] = img[..., [2, 1, 0]]\n if os.path.dirname(img_path):\n os.makedirs(os.path.dirname(img_path), exist_ok=True)\n if img_path.endswith('.png'):\n max = np.iinfo(save_dtype).max\n img = (img * max).clip(0, max).astype(save_dtype)\n elif img_path.endswith('.jpg'):\n img = img[..., :3] # only color\n img = (img * 255).clip(0, 255).astype(np.uint8)\n elif img_path.endswith('.hdr'):\n img = img[..., :3] # only color\n elif img_path.endswith('.exr'):\n # ... https://github.com/opencv/opencv/issues/21326\n os.environ[\"OPENCV_IO_ENABLE_OPENEXR\"] = \"1\"\n else:\n # should we try to discard alpha channel here?\n # exr could store alpha channel\n pass # no transformation for other unspecified file formats\n return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])" }, { "identifier": "common_opengl_options", "path": "easyvolcap/utils/gl_utils.py", "snippet": "def common_opengl_options():\n # Use program point size\n gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)\n\n # Performs face culling\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_BACK)\n\n # Performs alpha trans testing\n gl.glEnable(gl.GL_ALPHA_TEST)\n\n # Performs z-buffer testing\n gl.glEnable(gl.GL_DEPTH_TEST)\n # gl.glDepthMask(gl.GL_TRUE)\n gl.glDepthFunc(gl.GL_LEQUAL)\n # gl.glDepthRange(-1.0, 1.0)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n\n # Enable some masking tests\n gl.glEnable(gl.GL_SCISSOR_TEST)\n\n # Enable this to correctly render points\n # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310\n gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW\n # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW\n\n # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.\n # # The second argument specifies that our pixels will be in bytes.\n # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)" }, { "identifier": "linearize_depth", "path": "easyvolcap/utils/gl_utils.py", "snippet": "def linearize_depth(d, n: float, f: float):\n # 0-1 -> -1,1\n # ndc -> view\n return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))" }, { "identifier": "my_tests", "path": "easyvolcap/utils/test_utils.py", "snippet": "@catch_throw\ndef my_tests(globals: dict = globals(), prefix: str = 'test'):\n # extract testing functions\n tests = {name: func for name, func in globals.items() if name.startswith(prefix)}\n # run tests\n pbar = tqdm(total=len(tests))\n for name, func in tests.items():\n pbar.desc = name\n pbar.refresh()\n\n func()\n log(f'{name}: {green(\"OK\")}')\n\n pbar.update(n=1)\n pbar.refresh()" } ]
from easyvolcap.utils.egl_utils import eglContextManager # must be imported before OpenGL.GL from os.path import join, dirname from easyvolcap.utils.console_utils import * from easyvolcap.utils.gl_utils import Quad, Mesh from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.data_utils import save_image from easyvolcap.utils.gl_utils import common_opengl_options, linearize_depth from easyvolcap.utils.test_utils import my_tests import OpenGL.GL as gl import os import cv2 import torch import numpy as np
11,678
from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context
from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context
common_opengl_options() # common init
5
2023-12-07 08:53:42+00:00
16k
alibaba/animate-anything
utils/lora_handler.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n motion_mask = False,\n motion_strength = False,\n ):\n super().__init__()\n self.motion_mask = motion_mask\n self.motion_strength = motion_strength\n print(f\"motion mask {self.motion_mask}, motion_strength {self.motion_strength}\")\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n self.conv_in2 = nn.Conv2d(\n 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n cond_proj_dim=block_out_channels[0],\n )\n\n self.motion_proj = Timesteps(block_out_channels[0], True, 0)\n self.motion_embedding = nn.Sequential(\n nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim))\n nn.init.zeros_(self.motion_embedding[-1].weight)\n nn.init.zeros_(self.motion_embedding[-1].bias)\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value \n \n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n condition_latent: torch.Tensor,\n mask: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n motion = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n sample = torch.cat([condition_latent, sample], dim=2)\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n if self.motion_strength and motion is not None:\n timestep_cond = self.motion_proj(motion).to(dtype=self.dtype)\n emb = self.time_embedding(t_emb, timestep_cond)\n #emb += self.motion_embedding(m_emb)\n else:\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n if self.motion_mask and mask is not None:\n mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2])\n sample = torch.cat([mask, sample], dim=1)\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in2(sample)\n else:\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n\n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n \n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n sample = sample[:,:,1:]\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "convert_unet_state_dict", "path": "utils/convert_diffusers_to_original_ms_text_to_video.py", "snippet": "def convert_unet_state_dict(unet_state_dict, strict_mapping=False):\n print ('Converting the UNET')\n # buyer beware: this is a *brittle* function,\n # and correct output requires that all of these pieces interact in\n # the exact order in which I have arranged them.\n mapping = {k: k for k in unet_state_dict.keys()}\n\n for sd_name, hf_name in unet_conversion_map:\n if strict_mapping:\n if hf_name in mapping:\n mapping[hf_name] = sd_name\n else:\n mapping[hf_name] = sd_name\n for k, v in mapping.items():\n if \"resnets\" in k:\n for sd_part, hf_part in unet_conversion_map_resnet:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n # elif \"temp_convs\" in k:\n # for sd_part, hf_part in unet_conversion_map_resnet:\n # v = v.replace(hf_part, sd_part)\n # mapping[k] = v\n for k, v in mapping.items():\n for sd_part, hf_part in unet_conversion_map_layer:\n v = v.replace(hf_part, sd_part)\n mapping[k] = v\n \n\n # there must be a pattern, but I don't want to bother atm\n do_not_unsqueeze = [f'output_blocks.{i}.1.proj_out.weight' for i in range(3, 12)] + [f'output_blocks.{i}.1.proj_in.weight' for i in range(3, 12)] + ['middle_block.1.proj_in.weight', 'middle_block.1.proj_out.weight'] + [f'input_blocks.{i}.1.proj_out.weight' for i in [1, 2, 4, 5, 7, 8]] + [f'input_blocks.{i}.1.proj_in.weight' for i in [1, 2, 4, 5, 7, 8]]\n print (do_not_unsqueeze)\n\n new_state_dict = {v: (unet_state_dict[k].unsqueeze(-1) if ('proj_' in k and ('bias' not in k) and (k not in do_not_unsqueeze)) else unet_state_dict[k]) for k, v in mapping.items()}\n # HACK: idk why the hell it does not work with list comprehension\n for k, v in new_state_dict.items():\n has_k = False\n for n in do_not_unsqueeze:\n if k == n:\n has_k = True\n\n if has_k:\n v = v.squeeze(-1)\n new_state_dict[k] = v\n\n return new_state_dict" }, { "identifier": "convert_text_enc_state_dict_v20", "path": "utils/convert_diffusers_to_original_ms_text_to_video.py", "snippet": "def convert_text_enc_state_dict_v20(text_enc_dict):\n #print ('Converting the text encoder')\n new_state_dict = {}\n capture_qkv_weight = {}\n capture_qkv_bias = {}\n for k, v in text_enc_dict.items():\n if (\n k.endswith(\".self_attn.q_proj.weight\")\n or k.endswith(\".self_attn.k_proj.weight\")\n or k.endswith(\".self_attn.v_proj.weight\")\n ):\n k_pre = k[: -len(\".q_proj.weight\")]\n k_code = k[-len(\"q_proj.weight\")]\n if k_pre not in capture_qkv_weight:\n capture_qkv_weight[k_pre] = [None, None, None]\n capture_qkv_weight[k_pre][code2idx[k_code]] = v\n continue\n\n if (\n k.endswith(\".self_attn.q_proj.bias\")\n or k.endswith(\".self_attn.k_proj.bias\")\n or k.endswith(\".self_attn.v_proj.bias\")\n ):\n k_pre = k[: -len(\".q_proj.bias\")]\n k_code = k[-len(\"q_proj.bias\")]\n if k_pre not in capture_qkv_bias:\n capture_qkv_bias[k_pre] = [None, None, None]\n capture_qkv_bias[k_pre][code2idx[k_code]] = v\n continue\n\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)\n new_state_dict[relabelled_key] = v\n\n for k_pre, tensors in capture_qkv_weight.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_weight\"] = torch.cat(tensors)\n\n for k_pre, tensors in capture_qkv_bias.items():\n if None in tensors:\n raise Exception(\"CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing\")\n relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)\n new_state_dict[relabelled_key + \".in_proj_bias\"] = torch.cat(tensors)\n\n return new_state_dict" }, { "identifier": "extract_lora_ups_down", "path": "utils/lora.py", "snippet": "def extract_lora_ups_down(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for _m, _n, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append((_child_module.lora_up, _child_module.lora_down))\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras" }, { "identifier": "inject_trainable_lora_extended", "path": "utils/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d, nn.Conv3d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv3d(\n _child_module.in_channels,\n _child_module.out_channels,\n bias=_child_module.bias is not None,\n kernel_size=_child_module.kernel_size,\n padding=_child_module.padding,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias \n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n \n _module._modules[name] = _tmp\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = loras.pop(0)\n _module._modules[name].lora_down.weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight.requires_grad = True\n _module._modules[name].lora_down.weight.requires_grad = True\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "save_lora_weight", "path": "utils/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n): \n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float32))\n weights.append(_down.weight.to(\"cpu\").to(torch.float32))\n\n torch.save(weights, path)" }, { "identifier": "train_patch_pipe", "path": "utils/lora.py", "snippet": "def train_patch_pipe(pipe, patch_unet, patch_text):\n if patch_unet:\n print(\"LoRA : Patching Unet\")\n collapse_lora(pipe.unet)\n monkeypatch_remove_lora(pipe.unet)\n\n if patch_text:\n print(\"LoRA : Patching text encoder\")\n\n collapse_lora(pipe.text_encoder)\n monkeypatch_remove_lora(pipe.text_encoder)" }, { "identifier": "monkeypatch_or_replace_lora", "path": "utils/lora.py", "snippet": "def monkeypatch_or_replace_lora(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, LoraInjectedLinear]\n ):\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)" }, { "identifier": "monkeypatch_or_replace_lora_extended", "path": "utils/lora.py", "snippet": "def monkeypatch_or_replace_lora_extended(\n model,\n loras,\n target_replace_module=DEFAULT_TARGET_REPLACE,\n r: Union[int, List[int]] = 4,\n):\n for _module, name, _child_module in _find_modules(\n model,\n target_replace_module,\n search_class=[\n nn.Linear, \n nn.Conv2d, \n nn.Conv3d,\n LoraInjectedLinear, \n LoraInjectedConv2d, \n LoraInjectedConv3d,\n ],\n ):\n\n if (_child_module.__class__ == nn.Linear) or (\n _child_module.__class__ == LoraInjectedLinear\n ):\n if len(loras[0].shape) != 2:\n continue\n\n _source = (\n _child_module.linear\n if isinstance(_child_module, LoraInjectedLinear)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedLinear(\n _source.in_features,\n _source.out_features,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n _tmp.linear.weight = weight\n\n if bias is not None:\n _tmp.linear.bias = bias\n\n elif (_child_module.__class__ == nn.Conv2d) or (\n _child_module.__class__ == LoraInjectedConv2d\n ):\n if len(loras[0].shape) != 4:\n continue\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv2d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv2d(\n _source.in_channels,\n _source.out_channels,\n _source.kernel_size,\n _source.stride,\n _source.padding,\n _source.dilation,\n _source.groups,\n _source.bias is not None,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n elif _child_module.__class__ == nn.Conv3d or(\n _child_module.__class__ == LoraInjectedConv3d\n ):\n\n if len(loras[0].shape) != 5:\n continue\n\n _source = (\n _child_module.conv\n if isinstance(_child_module, LoraInjectedConv3d)\n else _child_module\n )\n\n weight = _source.weight\n bias = _source.bias\n _tmp = LoraInjectedConv3d(\n _source.in_channels,\n _source.out_channels,\n bias=_source.bias is not None,\n kernel_size=_source.kernel_size,\n padding=_source.padding,\n r=r.pop(0) if isinstance(r, list) else r,\n )\n\n _tmp.conv.weight = weight\n\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _module._modules[name] = _tmp\n\n up_weight = loras.pop(0)\n down_weight = loras.pop(0)\n\n _module._modules[name].lora_up.weight = nn.Parameter(\n up_weight.type(weight.dtype)\n )\n _module._modules[name].lora_down.weight = nn.Parameter(\n down_weight.type(weight.dtype)\n )\n\n _module._modules[name].to(weight.device)" }, { "identifier": "activate_lora_train", "path": "stable_lora/lora.py", "snippet": "def activate_lora_train(model, bias):\n def unfreeze():\n print(model.__class__.__name__ + \" LoRA set for training.\")\n return loralb.mark_only_lora_as_trainable(model, bias=bias)\n\n return unfreeze" }, { "identifier": "add_lora_to", "path": "stable_lora/lora.py", "snippet": "def add_lora_to(\n model, \n target_module=UNET_REPLACE, \n search_class=[torch.nn.Linear], \n r=32, \n dropout=0,\n lora_bias='none'\n):\n for module, name, child_module in find_modules(\n model, \n ancestor_class=target_module, \n search_class=search_class\n ):\n bias = hasattr(child_module, \"bias\")\n \n # Check if child module of the model has bias.\n if bias:\n if child_module.bias is None:\n bias = False\n\n # Check if the child module of the model is type Linear or Conv2d.\n if isinstance(child_module, torch.nn.Linear):\n l = create_lora_linear(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Conv2d):\n l = create_lora_conv(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Conv3d):\n l = create_lora_conv3d(child_module, r, dropout, bias=bias)\n\n if isinstance(child_module, torch.nn.Embedding):\n l = create_lora_emb(child_module, r)\n \n # If the model has bias and we wish to add it, use the child_modules in place\n if bias:\n l.bias = child_module.bias\n \n # Assign the frozen weight of model's Linear or Conv2d to the LoRA model.\n l.weight = child_module.weight\n\n # Replace the new LoRA model with the model's Linear or Conv2d module.\n module._modules[name] = l\n \n\n # Unfreeze only the newly added LoRA weights, but keep the model frozen.\n return activate_lora_train(model, lora_bias)" }, { "identifier": "save_lora", "path": "stable_lora/lora.py", "snippet": "def save_lora(\n unet=None, \n text_encoder=None, \n save_text_weights=False,\n output_dir=\"output\",\n lora_filename=\"lora.safetensors\",\n lora_bias='none', \n save_for_webui=True,\n only_webui=False,\n metadata=None,\n unet_dict_converter=None,\n text_dict_converter=None\n ):\n\n if not only_webui:\n # Create directory for the full LoRA weights.\n trainable_weights_dir = f\"{output_dir}/full_weights\"\n lora_out_file_full_weight = f\"{trainable_weights_dir}/{lora_filename}\"\n os.makedirs(trainable_weights_dir, exist_ok=True)\n\n ext = '.safetensors'\n # Create LoRA out filename.\n lora_out_file = f\"{output_dir}/webui_{lora_filename}{ext}\"\n\n if not only_webui:\n save_path_full_weights = lora_out_file_full_weight + ext\n\n save_path = lora_out_file\n\n if not only_webui:\n for i, model in enumerate([unet, text_encoder]):\n if save_text_weights and i == 1:\n non_webui_weights = save_path_full_weights.replace(ext, f\"_text_encoder{ext}\")\n\n else:\n non_webui_weights = save_path_full_weights.replace(ext, f\"_unet{ext}\")\n\n # Load only the LoRAs from the state dict.\n lora_dict = loralb.lora_state_dict(model, bias=lora_bias)\n \n # Save the models as fp32. This ensures we can finetune again without having to upcast. \n save_file(lora_dict, non_webui_weights)\n \n if save_for_webui:\n # Convert the keys to compvis model and webui\n unet_lora_dict = loralb.lora_state_dict(unet, bias=lora_bias) \n lora_dict_fp16 = unet_dict_converter(unet_lora_dict, strict_mapping=True)\n \n if save_text_weights:\n text_encoder_dict = loralb.lora_state_dict(text_encoder, bias=lora_bias)\n lora_dict_text_fp16 = text_dict_converter(text_encoder_dict)\n \n # Update the Unet dict to include text keys.\n lora_dict_fp16.update(lora_dict_text_fp16)\n\n # Cast tensors to fp16. It's assumed we won't be finetuning these.\n for k, v in lora_dict_fp16.items():\n lora_dict_fp16[k] = v.to(dtype=torch.float16)\n\n save_file(\n lora_dict_fp16, \n save_path, \n metadata=metadata\n )" }, { "identifier": "load_lora", "path": "stable_lora/lora.py", "snippet": "def load_lora(model, lora_path: str):\n try:\n if os.path.exists(lora_path):\n lora_dict = load_file(lora_path)\n model.load_state_dict(lora_dict, strict=False)\n\n except Exception as e:\n print(f\"Could not load your lora file: {e}\")" }, { "identifier": "set_mode_group", "path": "stable_lora/lora.py", "snippet": "def set_mode_group(models, train):\n for model in models: \n set_mode(model, train)\n model.train(train)" } ]
import os import torch import uuid from logging import warnings from typing import Union from types import SimpleNamespace from models.unet_3d_condition_mask import UNet3DConditionModel from transformers import CLIPTextModel from utils.convert_diffusers_to_original_ms_text_to_video import convert_unet_state_dict, convert_text_enc_state_dict_v20 from .lora import ( extract_lora_ups_down, inject_trainable_lora_extended, save_lora_weight, train_patch_pipe, monkeypatch_or_replace_lora, monkeypatch_or_replace_lora_extended ) from stable_lora.lora import ( activate_lora_train, add_lora_to, save_lora, load_lora, set_mode_group )
11,099
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector:
FILE_BASENAMES = ['unet', 'text_encoder'] LORA_FILE_TYPES = ['.pt', '.safetensors'] CLONE_OF_SIMO_KEYS = ['model', 'loras', 'target_replace_module', 'r'] STABLE_LORA_KEYS = ['model', 'target_module', 'search_class', 'r', 'dropout', 'lora_bias'] lora_versions = dict( stable_lora = "stable_lora", cloneofsimo = "cloneofsimo" ) lora_func_types = dict( loader = "loader", injector = "injector" ) lora_args = dict( model = None, loras = None, target_replace_module = [], target_module = [], r = 4, search_class = [torch.nn.Linear], dropout = 0, lora_bias = 'none' ) LoraVersions = SimpleNamespace(**lora_versions) LoraFuncTypes = SimpleNamespace(**lora_func_types) LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo] LORA_FUNC_TYPES = [LoraFuncTypes.loader, LoraFuncTypes.injector] def filter_dict(_dict, keys=[]): if len(keys) == 0: assert "Keys cannot empty for filtering return dict." for k in keys: if k not in lora_args.keys(): assert f"{k} does not exist in available LoRA arguments" return {k: v for k, v in _dict.items() if k in keys} class LoraHandler(object): def __init__( self, version: LORA_VERSIONS = LoraVersions.cloneofsimo, use_unet_lora: bool = False, use_text_lora: bool = False, save_for_webui: bool = False, only_for_webui: bool = False, lora_bias: str = 'none', unet_replace_modules: list = ['UNet3DConditionModel'], text_encoder_replace_modules: list = ['CLIPEncoderLayer'] ): self.version = version self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader) self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector) self.lora_bias = lora_bias self.use_unet_lora = use_unet_lora self.use_text_lora = use_text_lora self.save_for_webui = save_for_webui self.only_for_webui = only_for_webui self.unet_replace_modules = unet_replace_modules self.text_encoder_replace_modules = text_encoder_replace_modules self.use_lora = any([use_text_lora, use_unet_lora]) if self.use_lora: print(f"Using LoRA Version: {self.version}") def is_cloneofsimo_lora(self): return self.version == LoraVersions.cloneofsimo def is_stable_lora(self): return self.version == LoraVersions.stable_lora def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader): if self.is_cloneofsimo_lora(): if func_type == LoraFuncTypes.loader: return monkeypatch_or_replace_lora_extended if func_type == LoraFuncTypes.injector: return inject_trainable_lora_extended if self.is_stable_lora(): if func_type == LoraFuncTypes.loader: return load_lora if func_type == LoraFuncTypes.injector:
return add_lora_to
10
2023-12-07 08:26:29+00:00
16k
octo-models/octo
scripts/finetune.py
[ { "identifier": "make_single_dataset", "path": "octo/data/dataset.py", "snippet": "def make_single_dataset(\n dataset_kwargs: dict,\n *,\n train: bool,\n traj_transform_kwargs: dict = {},\n frame_transform_kwargs: dict = {},\n) -> dl.DLataset:\n \"\"\"Creates a single dataset from kwargs. Returns a dataset of trajectories.\n\n Args:\n dataset_kwargs: kwargs passed to `make_dataset_from_rlds` that are dataset-specific.\n train: whether this is a training or validation dataset.\n traj_transform_kwargs: kwargs passed to 'apply_trajectory_transforms'.\n frame_transform_kwargs: kwargs passed to 'get_frame_transforms'.\n \"\"\"\n dataset, dataset_statistics = make_dataset_from_rlds(\n **dataset_kwargs,\n train=train,\n )\n dataset = apply_trajectory_transforms(dataset, **traj_transform_kwargs, train=train)\n dataset = apply_frame_transforms(dataset, **frame_transform_kwargs, train=train)\n\n # this seems to reduce memory usage without affecting speed\n dataset = dataset.with_ram_budget(1)\n\n # save for later\n dataset.dataset_statistics = dataset_statistics\n return dataset" }, { "identifier": "OctoModel", "path": "octo/model/octo_model.py", "snippet": "class OctoModel:\n \"\"\"Recommended way of interacting with Octo models.\n\n Usage for inference:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> tasks = model.create_tasks(texts=[\"go to the red room\"])\n >>> # or tasks = model.create_tasks(goals={\"image_primary\": goal_images})\n >>> actions = model.sample_actions(observations, tasks, rng=jax.random.PRNGKey(0))\n >>> # Note: these are normalized actions (processed to mean 0 and std 1). To get the raw actions,\n # un-normalize them using model.dataset_statistics\n\n Usage for finetuning:\n\n >>> model = OctoModel.load_pretrained(checkpoint_dir)\n >>> train_state = octo.utils.train_utils.TrainState.create(\n rng=jax.random.PRNGKey(0),\n model=model,\n tx=optax.adamw(...)\n )\n >>> # access params through train_state.model.params\n >>> train_state, metrics = your_update_function(train_state, batch)\n >>> # when it's time to save (note that this only saves the model parameters,\n >>> # not the full optimizer state)\n >>> train_state.model.save_pretrained(step, save_dir)\n\n Usage for pretraining:\n\n >>> model = OctoModel.from_config(\n config,\n example_batch,\n text_processor\n ) # initializes params\n >>> # Continue as in finetuning example\n\n See full usage examples in train.py and finetune.py.\n\n \"\"\"\n\n module: OctoModule = struct.field(pytree_node=False)\n text_processor: TextProcessor = struct.field(pytree_node=False)\n config: Config = struct.field(pytree_node=False)\n params: Params\n example_batch: Data\n dataset_statistics: Optional[Data]\n\n def create_tasks(\n self, goals: Optional[Data] = None, texts: Optional[Sequence[str]] = None\n ):\n \"\"\"Creates tasks dict from goals and texts.\n\n Args:\n goals: if not None, dict of arrays with shape (batch_size, *)\n texts: if not None, list of texts of length batch_size\n\n Omit images to run the language-conditioned model, and omit texts to run the\n goal-conditioned model.\n \"\"\"\n assert goals is not None or texts is not None\n tasks = {\"pad_mask_dict\": {}}\n if goals is not None:\n tasks.update(goals)\n tasks[\"pad_mask_dict\"].update(\n {k: np.ones(v.shape[:1], dtype=bool) for k, v in goals.items()}\n )\n else:\n batch_size = len(texts)\n tasks.update(\n {\n k: np.zeros((batch_size, *v.shape[1:]), dtype=v.dtype)\n for k, v in self.example_batch[\"task\"].items()\n if k not in (\"pad_mask_dict\", \"language_instruction\")\n }\n )\n tasks[\"pad_mask_dict\"].update(\n {\n k: np.zeros(batch_size, dtype=bool)\n for k in tasks.keys()\n if k != \"pad_mask_dict\"\n }\n )\n\n if texts is not None:\n assert self.text_processor is not None\n tasks[\"language_instruction\"] = texts\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.ones(\n len(texts), dtype=bool\n )\n else:\n batch_size = jax.tree_leaves(goals)[0].shape[0]\n tasks[\"language_instruction\"] = [\"\"] * batch_size\n tasks[\"pad_mask_dict\"][\"language_instruction\"] = np.zeros(\n batch_size, dtype=bool\n )\n\n if self.text_processor is not None:\n tasks[\"language_instruction\"] = self.text_processor.encode(\n tasks[\"language_instruction\"]\n )\n else:\n del tasks[\"language_instruction\"]\n\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n return tasks\n\n @partial(jax.jit, static_argnames=(\"train\",))\n def run_transformer(\n self, observations: Data, tasks: Data, pad_mask: ArrayLike, train: bool = False\n ):\n \"\"\"Runs the transformer, but does shape checking on the inputs.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *shape).\n Shape must be consistent with self.example_batch[\"observation\"]\n tasks: dict of tasks of shape (batch_size, *shape)\n Shape must be consistent with self.example_batch[\"task\"]\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n \"\"\"\n _verify_shapes(\n observations,\n \"observations\",\n self.example_batch[\"observation\"],\n starting_dim=2,\n )\n _verify_shapes(tasks, \"tasks\", self.example_batch[\"task\"], starting_dim=1)\n\n return self.module.apply(\n {\"params\": self.params},\n observations,\n tasks,\n pad_mask,\n train=train,\n method=\"octo_transformer\",\n )\n\n @partial(jax.jit, static_argnames=(\"train\", \"sample_shape\", \"argmax\"))\n def sample_actions(\n self,\n observations: Data,\n tasks: Data,\n pad_mask: Optional[ArrayLike] = None,\n train: bool = False,\n argmax: bool = False,\n sample_shape: Tuple[int, ...] = (),\n rng: Optional[PRNGKey] = None,\n temperature: float = 1.0,\n ):\n \"\"\"Samples actions from the model. See `action_heads.py` for more info.\n\n Args:\n observations: dictionary of arrays of shape (batch_size, window_size, *)\n tasks: dict of tasks of shape (batch_size, *)\n pad_mask: (batch_size, window_size) Boolean mask that is False when the timestep corresponds to padding\n train: whether to run in train mode\n ...see `action_heads.py` for the rest of the kwargs.\n Returns:\n actions: (*sample_shape, batch_size, pred_horizon, action_dim)\n \"\"\"\n if pad_mask is None:\n pad_mask = observations[\"pad_mask\"]\n\n transformer_outputs = self.run_transformer(\n observations, tasks, pad_mask, train=train\n )\n action_head: ActionHead = self.module.bind({\"params\": self.params}).heads[\n \"action\"\n ]\n return action_head.predict_action(\n transformer_outputs,\n train=train,\n argmax=argmax,\n sample_shape=sample_shape,\n rng=rng,\n temperature=temperature,\n )\n\n @classmethod\n def load_pretrained(\n cls,\n checkpoint_path: str,\n step: Optional[int] = None,\n ) -> \"OctoModel\":\n \"\"\"Loads a model from a checkpoint that was saved via `save_pretrained`.\n\n Args:\n checkpoint_path (str): A path to either a directory of checkpoints or a single checkpoint.\n step (int, optional): If multiple checkpoints are present, which one to load. Defaults to the latest.\n \"\"\"\n if checkpoint_path.startswith(\"hf://\"):\n if step:\n raise ValueError(\n \"You can't set config['pretrained_step'] when loading from HuggingFace.\"\n )\n checkpoint_path = _download_from_huggingface(\n checkpoint_path.removeprefix(\"hf://\")\n )\n\n # load config\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"config.json\"), \"r\"\n ) as f:\n config = json.load(f)\n\n # load example batch\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"example_batch.msgpack\"), \"rb\"\n ) as f:\n example_batch = flax.serialization.msgpack_restore(f.read())\n # shim for migrating from \"tasks\" to \"task\"\n if \"tasks\" in example_batch:\n example_batch[\"task\"] = example_batch.pop(\"tasks\")\n\n logging.debug(\n \"Model was trained with observations: %s\",\n flax.core.pretty_repr(\n jax.tree_map(jnp.shape, example_batch[\"observation\"])\n ),\n )\n logging.debug(\n \"Model was trained with tasks: %s\",\n flax.core.pretty_repr(jax.tree_map(jnp.shape, example_batch[\"task\"])),\n )\n\n # load dataset statistics\n with tf.io.gfile.GFile(\n tf.io.gfile.join(checkpoint_path, \"dataset_statistics.json\"), \"r\"\n ) as f:\n dataset_statistics = json.load(f)\n dataset_statistics = jax.tree_map(\n np.array, dataset_statistics, is_leaf=lambda x: not isinstance(x, dict)\n )\n\n # create model def (an OctoModule)\n module = OctoModule.create(**config[\"model\"])\n # infer params shape without actually doing any computation\n params_shape = jax.eval_shape(\n partial(module.init, train=False),\n jax.random.PRNGKey(0),\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )[\"params\"]\n # restore params, checking to make sure the shape matches\n checkpointer = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n step = step if step is not None else checkpointer.latest_step()\n params = checkpointer.restore(step, params_shape)\n\n if config[\"text_processor\"] is not None:\n text_processor = ModuleSpec.instantiate(config[\"text_processor\"])()\n else:\n text_processor = None\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def save_pretrained(\n self,\n step: int,\n checkpoint_path: Optional[str] = None,\n checkpoint_manager: Optional[orbax.checkpoint.CheckpointManager] = None,\n ):\n \"\"\"Saves a model, as well as corresponding metadata needed for `load_pretrained`. Takes either a\n pre-existing checkpoint manager (which already knows where to save the checkpoint) or a path to a\n directory to save the checkpoint to.\n\n Args:\n step (int): Step number.\n checkpoint_path (str, optional): Path to save the checkpoint.\n checkpoint_manager (optional): Checkpoint manager to save the checkpoint.\n params (optional): Params to save. If None, uses self.params.\n \"\"\"\n if (checkpoint_path is None) == (checkpoint_manager is None):\n raise ValueError(\n \"Must provide exactly one of checkpoint_path or checkpoint_manager.\"\n )\n if checkpoint_manager is None:\n checkpoint_manager = orbax.checkpoint.CheckpointManager(\n checkpoint_path, orbax.checkpoint.PyTreeCheckpointer()\n )\n if checkpoint_path is None:\n checkpoint_path = str(checkpoint_manager._directory)\n\n # save params\n checkpoint_manager.save(\n step,\n self.params,\n {\"save_args\": orbax_utils.save_args_from_target(self.params)},\n )\n\n if jax.process_index() == 0:\n # save config\n config_path = tf.io.gfile.join(checkpoint_path, \"config.json\")\n if not tf.io.gfile.exists(config_path):\n with tf.io.gfile.GFile(config_path, \"w\") as f:\n json.dump(self.config, f)\n\n # save example batch\n example_batch_path = tf.io.gfile.join(\n checkpoint_path, \"example_batch.msgpack\"\n )\n if not tf.io.gfile.exists(example_batch_path):\n with tf.io.gfile.GFile(example_batch_path, \"wb\") as f:\n f.write(flax.serialization.msgpack_serialize(self.example_batch))\n\n # save dataset statistics\n dataset_statistics_path = tf.io.gfile.join(\n checkpoint_path, \"dataset_statistics.json\"\n )\n if not tf.io.gfile.exists(dataset_statistics_path):\n with tf.io.gfile.GFile(dataset_statistics_path, \"w\") as f:\n json.dump(\n jax.tree_map(lambda x: x.tolist(), self.dataset_statistics),\n f,\n )\n\n @classmethod\n def from_config(\n cls,\n config: Config,\n example_batch: Data,\n text_processor: Optional[Any] = None,\n verbose: bool = False,\n rng: Optional[PRNGKey] = None,\n dataset_statistics: Optional[Data] = None,\n ):\n \"\"\"Initializes a model with a fresh set of weights from a given config + example_batch.\n\n Args:\n config (Dict[str, Any]): Config dict. The only required key is \"model\", but other configuration\n may be saved for posterity.\n example_batch (Dict[str, Any]): Example batch.\n text_processor (Any, optional): Preprocessor for text inputs.\n verbose (bool, optional): Whether to print out a summary of the model.\n rng (Optional[PRNGKey], optional): RNG key for initializing the model.\n dataset_statistics (Optional[Dict[str, Any]], optional): Dataset statistics.\n \"\"\"\n module = OctoModule.create(**config[\"model\"])\n rng = rng if rng is not None else jax.random.PRNGKey(0)\n example_batch = multihost_utils.process_allgather(example_batch)\n example_batch = jax.tree_map(lambda x: x[:1], example_batch)\n\n init_args = (\n example_batch[\"observation\"],\n example_batch[\"task\"],\n example_batch[\"observation\"][\"pad_mask\"],\n )\n\n if verbose:\n print(\n module.tabulate(rng, *init_args, train=False, verbose=True, depth=2)\n ) # Prints out the parameter count of our model, and tokenizer details\n\n @jax.jit\n def _init(rng):\n return module.init(rng, *init_args, train=False)\n\n params = _init(rng)[\"params\"]\n\n return cls(\n module=module,\n params=params,\n text_processor=text_processor,\n example_batch=example_batch,\n config=config,\n dataset_statistics=dataset_statistics,\n )\n\n def get_pretty_spec(self):\n \"\"\"Brief summary of the model's expected inputs and outputs.\"\"\"\n # TODO: generalize this to print out proprio when it is being tokenized\n window_size = self.example_batch[\"observation\"][\"pad_mask\"].shape[1]\n\n observation_space = {\n k: (\"batch\", \"history_window\", *v.shape[2:])\n for k, v in self.example_batch[\"observation\"].items()\n if k.startswith(\"image\")\n }\n task_space = {\n k: (\"batch\", *v.shape[1:])\n for k, v in self.example_batch[\"task\"].items()\n if k.startswith(\"image\")\n }\n if self.text_processor is not None:\n task_space[\"language_instruction\"] = jax.tree_map(\n lambda arr: (\"batch\", *arr.shape[1:]),\n self.example_batch[\"task\"][\"language_instruction\"],\n )\n\n try:\n action_head = self.module.heads[\"action\"]\n action_head_repr = str(action_head.__class__)\n action_dim, pred_horizon = action_head.action_dim, action_head.pred_horizon\n except:\n action_head_repr, action_dim, pred_horizon = \"\", None, None\n\n return SPEC_TEMPLATE.format(\n window_size=window_size,\n observation_space=flax.core.pretty_repr(observation_space),\n task_space=flax.core.pretty_repr(task_space),\n action_head_repr=action_head_repr,\n action_dim=action_dim,\n pred_horizon=pred_horizon,\n )" }, { "identifier": "initialize_compilation_cache", "path": "octo/utils/jax_utils.py", "snippet": "def initialize_compilation_cache(\n cache_dir=os.path.expanduser(\"~/.jax_compilation_cache\"),\n):\n \"\"\"Initializes the Jax persistent compilation cache.\"\"\"\n compilation_cache.initialize_cache(cache_dir)\n for logger in [logging.getLogger(name) for name in logging.root.manager.loggerDict]:\n logger.addFilter(\n lambda record: \"Not writing persistent cache entry for\"\n not in record.getMessage()\n )" }, { "identifier": "ModuleSpec", "path": "octo/utils/spec.py", "snippet": "class ModuleSpec(TypedDict):\n \"\"\"A JSON-serializable representation of a function or class with some default args and kwargs to pass to\n it. Useful for specifying a particular class or function in a config file, while keeping it serializable\n and overridable from the command line using ml_collections.\n\n Usage:\n\n # Preferred way to create a spec:\n >>> from octo.model.components.transformer import Transformer\n >>> spec = ModuleSpec.create(Transformer, num_layers=3)\n # Same as above using the fully qualified import string:\n >>> spec = ModuleSpec.create(\"octo.model.components.transformer:Transformer\", num_layers=3)\n\n # Usage:\n >>> ModuleSpec.instantiate(spec) == partial(Transformer, num_layers=3)\n # can pass additional kwargs at instantiation time\n >>> transformer = ModuleSpec.instantiate(spec, num_heads=8)\n\n Note: ModuleSpec is just an alias for a dictionary (that is strongly typed), not a real class. So from\n your code's perspective, it is just a dictionary.\n\n module (str): The module the callable is located in\n name (str): The name of the callable in the module\n args (tuple): The args to pass to the callable\n kwargs (dict): The kwargs to pass to the callable\n \"\"\"\n\n module: str\n name: str\n args: Tuple[Any, ...]\n kwargs: Dict[str, Any]\n\n @staticmethod\n def create(callable_or_full_name: Union[str, callable], *args, **kwargs) -> \"ModuleSpec\": # type: ignore\n \"\"\"Create a module spec from a callable or import string.\n\n Args:\n callable_or_full_name (str or object): Either the object itself or a fully qualified import string\n (e.g. \"octo.model.components.transformer:Transformer\")\n args (tuple, optional): Passed into callable upon instantiation.\n kwargs (dict, optional): Passed into callable upon instantiation.\n \"\"\"\n if isinstance(callable_or_full_name, str):\n assert callable_or_full_name.count(\":\") == 1, (\n \"If passing in a string, it must be a fully qualified import string \"\n \"(e.g. 'octo.model.components.transformer:Transformer')\"\n )\n module, name = callable_or_full_name.split(\":\")\n else:\n module, name = _infer_full_name(callable_or_full_name)\n\n return ModuleSpec(module=module, name=name, args=args, kwargs=kwargs)\n\n @staticmethod\n def instantiate(spec: \"ModuleSpec\"): # type: ignore\n if set(spec.keys()) != {\"module\", \"name\", \"args\", \"kwargs\"}:\n raise ValueError(\n f\"Expected ModuleSpec, but got {spec}. \"\n \"ModuleSpec must have keys 'module', 'name', 'args', and 'kwargs'.\"\n )\n cls = _import_from_string(spec[\"module\"], spec[\"name\"])\n return partial(cls, *spec[\"args\"], **spec[\"kwargs\"])" }, { "identifier": "RolloutVisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class RolloutVisualizationCallback(Callback):\n visualizer_kwargs_list: Sequence[Mapping[str, Any]]\n text_processor: TextProcessor\n trajs_for_rollouts: int\n model_pred_horizon: int\n history_length: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.rollout_visualizers = [\n RolloutVisualizer(\n text_processor=self.text_processor,\n history_length=self.history_length,\n action_chunk=self.model_pred_horizon\n if \"pred_horizon\" not in kwargs\n else kwargs[\"pred_horizon\"],\n **kwargs,\n )\n for kwargs in self.visualizer_kwargs_list\n ]\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=1,\n policy_mode=mode,\n )\n for mode in self.modes_to_evaluate\n }\n for rollout_visualizer in self.rollout_visualizers:\n for mode, policy_fn in modal_policy_fns.items():\n logging.info(f\"Running rollouts for {rollout_visualizer.env_name}\")\n rollout_infos = rollout_visualizer.run_rollouts(\n policy_fn, n_rollouts=self.trajs_for_rollouts\n )\n wandb_metrics[\n f\"rollouts_{rollout_visualizer.env_name}_chunk{rollout_visualizer.action_chunk}/{mode}\"\n ] = rollout_infos\n\n return wandb_metrics" }, { "identifier": "SaveCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class SaveCallback(Callback):\n \"\"\"Callback that saves checkpoints to `save_dir`. If `save_dir` is None, does nothing.\"\"\"\n\n save_dir: Optional[str]\n\n def __post_init__(self):\n if self.save_dir is not None:\n if not self.save_dir.startswith(\"gs://\"):\n self.save_dir = os.path.abspath(self.save_dir)\n if jax.process_index() == 0:\n tf.io.gfile.makedirs(self.save_dir)\n logging.info(f\"Created {self.save_dir}\")\n # make checkpointers\n # only keep latest full TrainState\n self.state_checkpointer = orbax.checkpoint.CheckpointManager(\n tf.io.gfile.join(self.save_dir, \"state\"),\n orbax.checkpoint.PyTreeCheckpointer(),\n options=orbax.checkpoint.CheckpointManagerOptions(\n max_to_keep=1,\n ),\n )\n # keep every params checkpoint\n self.params_checkpointer = orbax.checkpoint.CheckpointManager(\n self.save_dir,\n orbax.checkpoint.PyTreeCheckpointer(),\n )\n\n def __call__(self, train_state: TrainState, step: int):\n if self.save_dir is not None:\n train_state.model.save_pretrained(\n step, checkpoint_manager=self.params_checkpointer\n )\n self.state_checkpointer.save(\n step,\n train_state,\n {\"save_args\": orbax_utils.save_args_from_target(train_state)},\n )" }, { "identifier": "ValidationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class ValidationCallback(Callback):\n loss_fn: Callable\n process_batch_fn: Callable[[Data], Data]\n text_processor: Optional[TextProcessor]\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n val_shuffle_buffer_size: int\n num_val_batches: int\n modes_to_evaluate: Sequence[str] = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n if self.text_processor is not None:\n self.zero_text = jax.tree_map(\n lambda x: x[0], self.text_processor.encode(\"\")\n )\n self.val_iterators = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n val_iterator = (\n val_dataset.unbatch()\n .shuffle(self.val_shuffle_buffer_size)\n .repeat()\n .batch(self.dataset_kwargs[\"batch_size\"])\n .iterator(prefetch=0)\n )\n val_iterator = map(self.process_batch_fn, val_iterator)\n self.val_iterators[single_dataset_kwargs[\"name\"]] = val_iterator\n\n @partial(\n jax.jit,\n out_shardings=jax.sharding.PositionalSharding(jax.devices()).replicate(),\n )\n def eval_step(state: TrainState, batch: Data):\n loss_fn_partial = partial(\n self.loss_fn,\n params=state.model.params,\n rng=state.rng,\n train=False,\n )\n all_tasks = {}\n\n if \"base\" in self.modes_to_evaluate:\n all_tasks[\"base\"] = batch[\"task\"]\n if \"image_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"image_conditioned\"] = remove_text(\n batch[\"task\"], self.zero_text\n )\n if \"text_conditioned\" in self.modes_to_evaluate:\n all_tasks[\"text_conditioned\"] = remove_images(batch[\"task\"])\n\n if \"unconditioned\" in self.modes_to_evaluate:\n all_tasks[\"unconditioned\"] = remove_text(\n remove_images(batch[\"task\"]), self.zero_text\n )\n return {\n k: loss_fn_partial(batch=flax.core.copy(batch, {\"task\": tasks}))[1]\n for k, tasks in all_tasks.items()\n }\n\n self.eval_step = eval_step\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n for name, val_data_iter in self.val_iterators.items():\n metrics = []\n for _, batch in tqdm.tqdm(\n zip(range(self.num_val_batches), val_data_iter),\n total=self.num_val_batches,\n desc=name,\n ):\n metrics.append(self.eval_step(train_state, batch))\n metrics = jax.tree_map(lambda *xs: np.mean(xs), *metrics)\n wandb_metrics[f\"validation_{name}\"] = metrics\n return wandb_metrics" }, { "identifier": "VisualizationCallback", "path": "octo/utils/train_callbacks.py", "snippet": "class VisualizationCallback(Callback):\n text_processor: TextProcessor\n val_dataset_kwargs_list: Sequence[Mapping[str, Any]]\n dataset_kwargs: Mapping[str, Any]\n eval_batch_size: int\n trajs_for_metrics: int\n trajs_for_viz: int\n samples_per_state: int\n modes_to_evaluate: str = (\"text_conditioned\", \"image_conditioned\")\n train: bool = False\n\n def __post_init__(self):\n self.zero_text = jax.tree_map(lambda x: x[0], self.text_processor.encode(\"\"))\n\n self.visualizers = {}\n for single_dataset_kwargs in self.val_dataset_kwargs_list:\n val_dataset = create_validation_dataset(\n single_dataset_kwargs,\n self.dataset_kwargs[\"traj_transform_kwargs\"],\n self.dataset_kwargs[\"frame_transform_kwargs\"],\n train=self.train,\n )\n self.visualizers[single_dataset_kwargs[\"name\"]] = Visualizer(\n val_dataset, text_processor=self.text_processor, freeze_trajs=False\n )\n\n def __call__(self, train_state: TrainState, step: int):\n wandb_metrics = {}\n modal_policy_fns = {\n mode: batched_apply(\n partial(\n get_policy_sampled_actions,\n train_state,\n zero_text=self.zero_text,\n samples_per_state=self.samples_per_state,\n policy_mode=mode,\n ),\n self.eval_batch_size,\n )\n for mode in self.modes_to_evaluate\n }\n\n for name, visualizer in self.visualizers.items():\n for mode, policy_fn in modal_policy_fns.items():\n if self.trajs_for_metrics > 0:\n raw_infos = visualizer.raw_evaluations(\n policy_fn, max_trajs=self.trajs_for_metrics\n )\n metrics = visualizer.metrics_for_wandb(raw_infos)\n wandb_metrics[f\"offline_metrics_{name}/{mode}\"] = metrics\n if self.trajs_for_viz > 0:\n images = visualizer.visualize_for_wandb(\n policy_fn, max_trajs=self.trajs_for_viz\n )\n wandb_metrics[f\"visualizations_{name}/{mode}\"] = images\n return wandb_metrics" }, { "identifier": "check_config_diff", "path": "octo/utils/train_utils.py", "snippet": "def check_config_diff(new_conf: Config, old_conf: Config, silent: bool = False):\n \"\"\"Checks for differences between new config and old config dicts.\"\"\"\n new_conf_flat = flax.traverse_util.flatten_dict(\n new_conf.to_dict() if isinstance(new_conf, ConfigDict) else new_conf\n )\n old_conf_flat = flax.traverse_util.flatten_dict(\n old_conf.to_dict() if isinstance(old_conf, ConfigDict) else old_conf\n )\n\n # check for missing / new keys\n if set(new_conf_flat.keys()) != set(old_conf_flat.keys()) and not silent:\n logging.info(\n \"New config contains extra items: %s\",\n set(new_conf_flat.keys()) - set(old_conf_flat.keys()),\n )\n logging.info(\n \"New config doesn't contain items: %s\",\n set(old_conf_flat.keys()) - set(new_conf_flat.keys()),\n )\n\n # print differing key values\n mismatched_keys = {\n k: (new_conf_flat[k], old_conf_flat[k])\n for k in new_conf_flat\n if k in old_conf_flat and new_conf_flat[k] != old_conf_flat[k]\n }\n if mismatched_keys and not silent:\n logging.info(\n \"New config contains keys with new values: %s\",\n flax.core.pretty_repr(mismatched_keys),\n )\n return mismatched_keys or (set(new_conf_flat.keys()) != set(old_conf_flat.keys()))" }, { "identifier": "create_optimizer", "path": "octo/utils/train_utils.py", "snippet": "def create_optimizer(\n params_or_params_shape: Params, **kwargs: dict\n) -> optax.GradientTransformation:\n \"\"\"Creates optimizer for Octo.\n\n kwargs are the kwargs for optax.adamw; if the \"learning_rate\" key is a dict, it is interpreted\n as the kwargs for create_lr_schedule (see above), otherwise it is interpreted as a constant\n learning rate.\n\n If clip_gradient is specified, then gradient clipping is applied. If frozen_keys is specified,\n then those parameters are frozen (i.e. not updated) during training.\n\n Returns:\n tx: an Optax optimizer\n lr_callable: Function that takes the current step and returns the learning rate\n \"\"\"\n if isinstance(kwargs[\"learning_rate\"], dict):\n lr_callable = create_lr_schedule(**kwargs[\"learning_rate\"])\n else:\n lr_callable = lambda _: kwargs[\"learning_rate\"]\n kwargs[\"learning_rate\"] = lr_callable\n\n # Following ViT, timm, MAE: this mask skips weight decay on biases and LayerNorm parameters\n wd_mask = jax.tree_util.tree_map_with_path(\n lambda path, x: \"kernel\" in jax.tree_util.keystr(path), params_or_params_shape\n )\n\n clip_gradient = kwargs.pop(\"clip_gradient\", None)\n frozen_keys = kwargs.pop(\"frozen_keys\", None)\n grad_accumulation_steps = kwargs.pop(\"grad_accumulation_steps\", None)\n\n tx = optax.adamw(mu_dtype=jnp.bfloat16, **kwargs, mask=wd_mask)\n if grad_accumulation_steps:\n tx = optax.MultiSteps(tx, grad_accumulation_steps)\n if clip_gradient is not None:\n tx = optax.chain(\n optax.clip_by_global_norm(clip_gradient),\n tx,\n )\n\n if frozen_keys:\n tx, param_partitions = freeze_weights(\n tx, params_or_params_shape, frozen_keys, return_partitions=True\n )\n zero_frozen_params = lambda params: jax.tree_map(\n lambda x, y: x if y == \"trainable\" else jnp.zeros(()),\n params,\n param_partitions,\n )\n param_norm_callable = lambda params: optax.global_norm(\n zero_frozen_params(params)\n )\n else:\n param_norm_callable = optax.global_norm\n\n return tx, lr_callable, param_norm_callable" }, { "identifier": "format_name_with_config", "path": "octo/utils/train_utils.py", "snippet": "def format_name_with_config(name, config):\n \"\"\"Formats a name string with a config dict.\n\n Formatting keys may be specified as {key} or {full_path_to_key_with_underscores}.\n\n Example:\n name = \"model_{model_type}_{model_size}\"\n config = {\"model_type\": \"transformer\", \"model_size\": \"small\"}\n format_name_with_config(name, config) -> \"model_transformer_small\"\n \"\"\"\n config_flat = flax.traverse_util.flatten_dict(config, sep=\"_\")\n config_final = {k.split(\"_\")[-1]: v for k, v in config_flat.items()}\n format_dict = {**config_final, **config_flat}\n return name.format(**format_dict)" }, { "identifier": "merge_params", "path": "octo/utils/train_utils.py", "snippet": "def merge_params(target_params: Params, pretrained_params: Params) -> Params:\n \"\"\"Copies pre-trained params into target_params for every param that has corresponding key + shape.\"\"\"\n flat_target_params = flax.traverse_util.flatten_dict(target_params)\n flat_pretrained_params = flax.traverse_util.flatten_dict(pretrained_params)\n keys_to_update = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape == flat_pretrained_params[k].shape\n ]\n missing_keys = [k for k in flat_target_params if k not in flat_pretrained_params]\n shape_mismatch_keys = [\n k\n for k in flat_target_params\n if k in flat_pretrained_params\n and flat_target_params[k].shape != flat_pretrained_params[k].shape\n ]\n\n for key in keys_to_update:\n logging.debug(f\"Param copied from pre-trained: {'.'.join(key)}\")\n if missing_keys or shape_mismatch_keys:\n logging.info(\"########## Parameters skipped during model loading: ##########\")\n for key in missing_keys:\n logging.info(\n f\"Param missing in pre-trained model, skipping: {'.'.join(key)}\"\n )\n for key in shape_mismatch_keys:\n logging.info(\n f\"Param with differing shape in pre-trained model, skipping: {'.'.join(key)}\"\n )\n\n flat_target_params = flax.core.copy(\n flat_target_params, {k: flat_pretrained_params[k] for k in keys_to_update}\n )\n target_params = flax.traverse_util.unflatten_dict(flat_target_params)\n return target_params" }, { "identifier": "process_text", "path": "octo/utils/train_utils.py", "snippet": "def process_text(batch: Data, text_processor: Optional[TextProcessor]) -> Data:\n \"\"\"Encodes the language instruction inside the tasks for a batch.\n\n If the text processor is None, removes language entirely from the tasks.\n Expects batch to be a nested dictionary, where\n batch[\"task\"][\"language_instruction\"] is a sequence of byte strings\n \"\"\"\n if text_processor is None:\n batch[\"task\"].pop(\"language_instruction\")\n else:\n batch[\"task\"][\"language_instruction\"] = text_processor.encode(\n [s.decode(\"utf-8\") for s in batch[\"task\"][\"language_instruction\"]]\n )\n return batch" }, { "identifier": "Timer", "path": "octo/utils/train_utils.py", "snippet": "class Timer:\n \"\"\"\n Timer utility. Usage:\n\n timer = Timer()\n with timer(\"foo\"):\n do_something()\n\n timer.tick(\"bar\")\n do_something_else()\n timer.tock(\"bar\")\n\n timer.get_average_times() -> {\"foo\": 0.1, \"bar\": 0.2}\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n @contextmanager\n def __call__(self, key):\n self.tick(key)\n try:\n yield None\n finally:\n self.tock(key)\n\n def reset(self):\n self.counts = defaultdict(int)\n self.times = defaultdict(float)\n self.start_times = {}\n\n def tick(self, key):\n if key in self.start_times:\n raise ValueError(f\"Timer is already ticking for key: {key}\")\n self.start_times[key] = time.time()\n\n def tock(self, key):\n if key not in self.start_times:\n raise ValueError(f\"Timer is not ticking for key: {key}\")\n self.counts[key] += 1\n self.times[key] += time.time() - self.start_times[key]\n del self.start_times[key]\n\n def get_average_times(self, reset=True):\n ret = {key: self.times[key] / self.counts[key] for key in self.counts}\n if reset:\n self.reset()\n return ret" }, { "identifier": "TrainState", "path": "octo/utils/train_utils.py", "snippet": "class TrainState:\n rng: PRNGKey\n model: OctoModel\n step: int\n opt_state: optax.OptState\n tx: optax.GradientTransformation = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n rng: PRNGKey,\n model: OctoModel,\n tx: optax.GradientTransformation,\n ):\n opt_state = tx.init(model.params)\n return cls(\n rng=rng,\n model=model,\n step=0,\n opt_state=opt_state,\n tx=tx,\n )\n\n def apply_gradients(self, *, grads, rng):\n updates, new_opt_state = self.tx.update(\n grads, self.opt_state, self.model.params\n )\n new_params = optax.apply_updates(self.model.params, updates)\n\n return self.replace(\n step=self.step + 1,\n model=self.model.replace(params=new_params),\n opt_state=new_opt_state,\n rng=rng,\n )" } ]
import datetime import imp import os import flax import jax import optax import tensorflow as tf import tqdm import wandb from functools import partial from absl import app, flags, logging from flax.traverse_util import flatten_dict from jax.sharding import Mesh, NamedSharding, PartitionSpec from ml_collections import config_flags, ConfigDict from octo.data.dataset import make_single_dataset from octo.model.octo_model import OctoModel from octo.utils.jax_utils import initialize_compilation_cache from octo.utils.spec import ModuleSpec from octo.utils.train_callbacks import ( RolloutVisualizationCallback, SaveCallback, ValidationCallback, VisualizationCallback, ) from octo.utils.train_utils import ( check_config_diff, create_optimizer, format_name_with_config, merge_params, process_text, Timer, TrainState, ) from jax_smi import initialise_tracking # type: ignore
11,508
wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs] val_callback = ValidationCallback( loss_fn=loss_fn, process_batch_fn=process_batch, text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.val_kwargs, ) viz_callback = VisualizationCallback( text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.viz_kwargs, ) ######### # # Optionally build visualizers for sim env evals # ######### if "rollout_kwargs" in FLAGS.config:
try: initialise_tracking() except ImportError: pass FLAGS = flags.FLAGS flags.DEFINE_string("name", "experiment", "Experiment name.") flags.DEFINE_bool("debug", False, "Debug config (no wandb logging)") default_config_file = os.path.join( os.path.dirname(__file__), "configs/finetune_config.py" ) config_flags.DEFINE_config_file( "config", default_config_file, "File path to the training hyperparameter configuration.", lock_config=False, ) def main(_): initialize_compilation_cache() devices = jax.devices() logging.info( f""" Octo Finetuning Script ====================== Pretrained model: {FLAGS.config.pretrained_path} Finetuning Dataset: {FLAGS.config.dataset_kwargs.name} Data dir: {FLAGS.config.dataset_kwargs.data_dir} Task Modality: {FLAGS.config.modality} Finetuning Mode: {FLAGS.config.finetuning_mode} # Devices: {jax.device_count()} Batch size: {FLAGS.config.batch_size} ({FLAGS.config.batch_size // len(devices) } per device) # Steps: {FLAGS.config.num_steps} """ ) ######### # # Setup Jax Data Parallelism # ######### assert ( FLAGS.config.batch_size % len(devices) == 0 ), f"Batch size ({FLAGS.config.batch_size}) must be divisible by the number of devices ({len(devices)})" assert ( FLAGS.config.viz_kwargs.eval_batch_size % len(devices) == 0 ), f"Eval batch size ({FLAGS.config.viz_kwargs.eval_batch_size}) must be divisible by the number of devices ({len(devices)})" # create a 1D mesh with a single axis named "batch" mesh = Mesh(jax.devices(), axis_names="batch") # Our batches will be data-parallel sharded -- each device will get a slice of the batch dp_sharding = NamedSharding(mesh, PartitionSpec("batch")) # Our model will be replicated across devices (we are only doing data parallelism, not model parallelism) replicated_sharding = NamedSharding(mesh, PartitionSpec()) # prevent tensorflow from using GPU memory since it's only used for data loading tf.config.set_visible_devices([], "GPU") ######### # # Setup WandB # ######### name = format_name_with_config( FLAGS.name, FLAGS.config.to_dict(), ) wandb_id = "{name}_{time}".format( name=name, time=datetime.datetime.now().strftime("%Y%m%d_%H%M%S"), ) wandb.init( config=FLAGS.config.to_dict(), id=wandb_id, name=name, mode="disabled" if FLAGS.debug else None, **FLAGS.config.wandb, ) ######### # # Load Pretrained model + optionally modify config # ######### pretrained_model = OctoModel.load_pretrained( FLAGS.config.pretrained_path, step=FLAGS.config.pretrained_step, ) flat_config = flax.traverse_util.flatten_dict( pretrained_model.config, keep_empty_nodes=True ) for d_key in flax.traverse_util.flatten_dict( FLAGS.config.get("config_delete_keys", ConfigDict()).to_dict() ): for c_key in list(flat_config.keys()): if ".".join(c_key).startswith(".".join(d_key)): del flat_config[c_key] config = ConfigDict(flax.traverse_util.unflatten_dict(flat_config)) config.update(FLAGS.config.get("update_config", ConfigDict())) config = config.to_dict() check_config_diff(config, pretrained_model.config) ######### # # Setup Data Loader # ######### # create text processor if config["text_processor"] is None: text_processor = None else: text_processor = ModuleSpec.instantiate(config["text_processor"])() def process_batch(batch): batch = process_text(batch, text_processor) del batch["dataset_name"] return batch # load standardize_fn from `path/to/file.py:fn_name` format if ( standardize_fn := FLAGS.config["dataset_kwargs"].get("standardize_fn", None) ) is not None: path, name = standardize_fn.split(":") # imp is deprecated, but it's also what ml_collections uses standardize_fn = getattr(imp.load_source("standardize_fn", path), name) del FLAGS.config["dataset_kwargs"]["standardize_fn"] FLAGS.config["dataset_kwargs"]["standardize_fn"] = standardize_fn dataset = make_single_dataset( FLAGS.config.dataset_kwargs, traj_transform_kwargs=FLAGS.config.traj_transform_kwargs, frame_transform_kwargs=FLAGS.config.frame_transform_kwargs, train=True, ) train_data_iter = ( dataset.repeat() .unbatch() .shuffle(FLAGS.config.shuffle_buffer_size) .batch(FLAGS.config.batch_size) .iterator() ) train_data_iter = map(process_batch, train_data_iter) example_batch = next(train_data_iter) ######### # # Load Pretrained Model # ######### rng = jax.random.PRNGKey(FLAGS.config.seed) rng, init_rng = jax.random.split(rng) model = OctoModel.from_config( config, example_batch, text_processor, rng=init_rng, dataset_statistics=dataset.dataset_statistics, ) merged_params = merge_params(model.params, pretrained_model.params) model = model.replace(params=merged_params) del pretrained_model ######### # # Setup Optimizer and Train State # ######### params = model.params if FLAGS.config.optimizer.frozen_keys is None: FLAGS.config.optimizer.frozen_keys = model.config["optimizer"]["frozen_keys"] tx, lr_callable, param_norm_callable = create_optimizer( params, **FLAGS.config.optimizer.to_dict(), ) train_state = TrainState.create( model=model, tx=tx, rng=rng, ) ######### # # Save all metadata # ######### if FLAGS.config.save_dir is not None: save_dir = tf.io.gfile.join( FLAGS.config.save_dir, FLAGS.config.wandb.project, FLAGS.config.wandb.group or "", wandb_id, ) wandb.config.update(dict(save_dir=save_dir), allow_val_change=True) logging.info("Saving to %s", save_dir) save_callback = SaveCallback(save_dir) # Add window_size to top of config, to make eval easier new_config = ConfigDict(model.config) new_config["window_size"] = example_batch["observation"]["pad_mask"].shape[1] model = model.replace(config=new_config) # Save finetuning config since it's not saved by SaveCallback, i.e. as part of model.save_pretrained() with open( tf.io.gfile.join(save_dir, "finetune_config.json"), "w" ) as config_file: config_file.write(FLAGS.config.to_json_best_effort()) else: save_dir = None save_callback = SaveCallback(None) logging.warning("save_dir not passed in, not saving checkpoints") example_batch_spec = jax.tree_map( lambda arr: (arr.shape, str(arr.dtype)), example_batch ) wandb.config.update( dict(example_batch_spec=example_batch_spec), allow_val_change=True ) ######### # # Define loss, train_step, and eval_step # ######### def loss_fn(params, batch, rng, train=True): bound_module = model.module.bind({"params": params}, rngs={"dropout": rng}) transformer_embeddings = bound_module.octo_transformer( batch["observation"], batch["task"], batch["observation"]["pad_mask"], train=train, ) action_loss, action_metrics = bound_module.heads["action"].loss( transformer_embeddings, # Action head knows to pull out the action readout_key batch["action"], pad_mask=batch["observation"]["pad_mask"], train=train, ) return action_loss, action_metrics # Data parallelism # Model is replicated across devices, data is split across devices @partial( jax.jit, in_shardings=[replicated_sharding, dp_sharding], ) def train_step(state, batch): rng, dropout_rng = jax.random.split(state.rng) (loss, info), grads = jax.value_and_grad(loss_fn, has_aux=True)( state.model.params, batch, dropout_rng, train=True ) # Gradient Metrics (TODO: Does the finetuner need these?) ### grad_norm = optax.global_norm(grads) updates, _ = state.tx.update(grads, state.opt_state, state.model.params) update_norm = optax.global_norm(updates) info.update( { "grad_norm": grad_norm, "update_norm": update_norm, "param_norm": param_norm_callable(state.model.params), "learning_rate": lr_callable(state.step), } ) # End Debug Metrics # new_state = state.apply_gradients(grads=grads, rng=rng) return new_state, info ######### # # Build validation & visualization callbacks # ######### if FLAGS.config.modality == "image_conditioned": modes_to_evaluate = ["image_conditioned"] elif FLAGS.config.modality == "text_conditioned": modes_to_evaluate = ["text_conditioned"] elif FLAGS.config.modality == "multimodal": modes_to_evaluate = ["image_conditioned", "text_conditioned"] else: modes_to_evaluate = ["base"] dataset_kwargs_list = [FLAGS.config.dataset_kwargs] val_callback = ValidationCallback( loss_fn=loss_fn, process_batch_fn=process_batch, text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.val_kwargs, ) viz_callback = VisualizationCallback( text_processor=text_processor, val_dataset_kwargs_list=dataset_kwargs_list, dataset_kwargs=FLAGS.config, modes_to_evaluate=modes_to_evaluate, **FLAGS.config.viz_kwargs, ) ######### # # Optionally build visualizers for sim env evals # ######### if "rollout_kwargs" in FLAGS.config:
rollout_callback = RolloutVisualizationCallback(
4
2023-12-13 09:58:56+00:00
16k
LinShan-Bin/OccNeRF
run.py
[ { "identifier": "Runer", "path": "runner.py", "snippet": "class Runer:\n\n def __init__(self, options):\n\n self.opt = options\n self.opt.B = self.opt.batch_size // self.opt.cam_N\n if self.opt.debug:\n self.opt.voxels_size = [8, 128, 128]\n self.opt.render_h = 45\n self.opt.render_w = 80\n self.opt.num_workers = 1\n self.opt.model_name = \"debug/\"\n\n self.log_path = osp.join(self.opt.log_dir, self.opt.model_name + 'exp-{}'.format(time.strftime(\"%Y_%m_%d-%H_%M\", time.localtime())))\n\n\n print('log path:', self.log_path)\n os.makedirs(osp.join(self.log_path, 'eval'), exist_ok=True)\n os.makedirs(osp.join(self.log_path, 'models'), exist_ok=True)\n os.makedirs(osp.join(self.log_path, 'visual_rgb_depth'), exist_ok=True)\n os.makedirs(osp.join(self.log_path, 'visual_feature'), exist_ok=True)\n\n # pdb.set_trace()\n\n self.models = {}\n self.parameters_to_train = []\n\n self.local_rank = self.opt.local_rank\n torch.cuda.set_device(self.local_rank)\n dist.init_process_group(backend='nccl')\n self.device = torch.device(\"cuda\", self.local_rank)\n\n self.num_scales = len(self.opt.scales)\n self.num_input_frames = len(self.opt.frame_ids)\n # self.num_pose_frames = 2 if self.opt.pose_model_input == \"pairs\" else self.num_input_frames\n\n assert self.opt.frame_ids[0] == 0, \"frame_ids must start with 0\"\n\n self.models[\"encoder\"] = networks.Encoder_res101(self.opt.input_channel, path=None, network_type=self.opt.encoder)\n self.models[\"depth\"] = networks.VolumeDecoder(self.opt)\n self.log_print('N_samples: {}'.format(self.models[\"depth\"].N_samples))\n self.log_print('Voxel size: {}'.format(self.models[\"depth\"].voxel_size))\n\n\n self.models[\"encoder\"] = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.models[\"encoder\"])\n self.models[\"encoder\"] = (self.models[\"encoder\"]).to(self.device)\n\n self.parameters_to_train += [{'params': self.models[\"encoder\"].parameters(), 'lr': self.opt.learning_rate, 'weight_decay': self.opt.weight_decay}]\n\n self.models[\"depth\"] = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.models[\"depth\"])\n self.models[\"depth\"] = (self.models[\"depth\"]).to(self.device)\n\n\n # pdb.set_trace()\n # self.parameters_to_train += [{'params': self.models[\"depth\"].parameters(), 'lr': self.opt.de_lr}]\n\n if self.opt.position == 'embedding1':\n self.parameters_to_train += [\n {'params': self.models[\"depth\"]._3DCNN.parameters(), 'lr': self.opt.de_lr, 'weight_decay': self.opt.weight_decay},\n {'params': self.models[\"depth\"].embeddingnet.parameters(), 'lr': self.opt.en_lr, 'weight_decay': self.opt.weight_decay}]\n \n else:\n self.parameters_to_train += [{'params': self.models[\"depth\"].parameters(), 'lr': self.opt.de_lr, 'weight_decay': self.opt.weight_decay}]\n\n if self.opt.load_weights_folder is not None:\n self.load_model()\n\n for key in self.models.keys():\n self.models[key] = DDP(self.models[key], device_ids=[self.local_rank], output_device=self.local_rank,\n find_unused_parameters=True, broadcast_buffers=False)\n\n self.model_optimizer = optim.AdamW(self.parameters_to_train)\n self.criterion = nn.BCELoss()\n self.model_lr_scheduler = optim.lr_scheduler.StepLR(self.model_optimizer, self.opt.scheduler_step_size, gamma = 0.1, last_epoch=-1)\n\n\n for key in self.models.keys():\n for name, param in self.models[key].named_parameters():\n if param.requires_grad:\n pass\n else:\n print(name)\n # print(param.data)\n print(\"requires_grad:\", param.requires_grad)\n print(\"-----------------------------------\")\n\n if self.local_rank == 0:\n self.log_print(\"Training model named: {}\".format(self.opt.model_name))\n\n datasets_dict = {\n # \"ddad\": datasets.DDADDatasetRevision,\n \"nusc\": datasets.NuscDataset,\n # \"kitti\": datasets.KittiDataset,\n }\n\n self.dataset = datasets_dict[self.opt.dataset]\n\n self.opt.batch_size = self.opt.batch_size // self.opt.cam_N\n\n if self.opt.dataset == 'nusc':\n nusc = NuScenes(version='v1.0-trainval', dataroot=osp.join(self.opt.dataroot, 'nuscenes'), verbose=False)\n else:\n nusc = None\n\n train_dataset = self.dataset(self.opt,\n self.opt.height, self.opt.width,\n self.opt.frame_ids, num_scales=self.num_scales, is_train=True,\n volume_depth=self.opt.volume_depth, nusc=nusc)\n\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)\n\n # pdb.set_trace()\n self.train_loader = DataLoader(\n train_dataset, self.opt.batch_size, collate_fn=self.my_collate,\n num_workers=self.opt.num_workers, pin_memory=True, drop_last=True, sampler=train_sampler)\n\n self.num_total_steps = len(self.train_loader) * self.opt.num_epochs\n\n val_dataset = self.dataset(self.opt,\n self.opt.height, self.opt.width,\n self.opt.frame_ids, num_scales=1, is_train=False,\n volume_depth=self.opt.volume_depth, nusc=nusc)\n\n\n rank, world_size = get_dist_info()\n self.world_size = world_size\n val_sampler = DistributedSampler(val_dataset, world_size, rank, shuffle=False)\n\n\n self.val_loader = DataLoader(\n val_dataset, self.opt.batch_size, collate_fn=self.my_collate,\n num_workers=self.opt.num_workers, pin_memory=True, drop_last=False, sampler=val_sampler)\n\n\n self.num_val = len(val_dataset)\n\n self.opt.batch_size = self.opt.batch_size * self.opt.cam_N\n self.num_val = self.num_val * self.opt.cam_N\n\n self.best_result_str = ''\n self.best_abs_rel = 1.0\n\n if not self.opt.no_ssim:\n self.ssim = SSIM()\n self.ssim.to(self.device)\n\n self.backproject_depth = {}\n self.project_3d = {}\n for scale in self.opt.scales:\n h = self.opt.height // (2 ** scale)\n w = self.opt.width // (2 ** scale)\n\n num_cam = self.opt.cam_N * 3 if self.opt.auxiliary_frame else self.opt.cam_N\n self.backproject_depth[scale] = BackprojectDepth(num_cam, h, w)\n self.backproject_depth[scale].to(self.device)\n\n self.project_3d[scale] = Project3D(num_cam, h, w)\n self.project_3d[scale].to(self.device)\n\n self.depth_metric_names = [\n \"de/abs_rel\", \"de/sq_rel\", \"de/rms\", \"de/log_rms\", \"da/a1\", \"da/a2\", \"da/a3\"]\n\n if self.local_rank == 0:\n self.log_print(\"There are {:d} training items and {:d} validation items\\n\".format(\n len(train_dataset), len(val_dataset)))\n \n if self.opt.use_semantic:\n if len(self.opt.class_frequencies) == self.opt.semantic_classes:\n self.class_weights = 1.0 / np.sqrt(np.array(self.opt.class_frequencies, dtype=np.float32))\n self.class_weights = np.nan_to_num(self.class_weights, posinf=0)\n self.class_weights = self.class_weights / np.mean(self.class_weights)\n self.sem_criterion = nn.CrossEntropyLoss(\n weight=torch.FloatTensor(self.class_weights).to(self.device),\n ignore_index=-1, reduction=\"mean\")\n else:\n self.sem_criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction=\"mean\")\n\n self.save_opts()\n\n\n def my_collate(self, batch):\n batch_new = {}\n keys_list = list(batch[0].keys())\n special_key_list = ['id']\n\n for key in keys_list:\n if key not in special_key_list:\n # print('key:', key)\n batch_new[key] = [item[key] for item in batch]\n try:\n batch_new[key] = torch.cat(batch_new[key], axis=0)\n except:\n print('key', key)\n\n else:\n batch_new[key] = []\n for item in batch:\n for value in item[key]:\n # print(value.shape)\n batch_new[key].append(value)\n\n return batch_new\n\n def to_device(self, inputs):\n\n special_key_list = ['id', ('K_ori', -1), ('K_ori', 1)]\n\n for key, ipt in inputs.items():\n\n if key in special_key_list:\n inputs[key] = ipt\n\n else:\n inputs[key] = ipt.to(self.device)\n\n def set_train(self):\n \"\"\"Convert all models to training mode\n \"\"\"\n for m in self.models.values():\n m.train()\n\n def set_eval(self):\n \"\"\"Convert all models to testing/evaluation mode\n \"\"\"\n for m in self.models.values():\n m.eval()\n\n def train(self):\n \"\"\"Run the entire training pipeline\"\"\"\n if self.local_rank == 0:\n\n os.makedirs(osp.join(self.log_path, 'code'), exist_ok=True)\n\n # back up files\n source1 = 'runner.py'\n source3 = 'run.py'\n source4 = 'options.py'\n source5 = 'run_vis.py'\n\n source6 = 'configs'\n source7 = 'networks'\n source8 = 'datasets'\n source9 = 'utils'\n\n source = [source1, source3, source4, source5]\n for i in source:\n shutil.copy(i, osp.join(self.log_path, 'code'))\n\n if not osp.exists(osp.join(self.log_path, 'code' + '/configs')):\n shutil.copytree(source6, osp.join(self.log_path, 'code' + '/configs'))\n\n if not osp.exists(osp.join(self.log_path, 'code' + '/networks')):\n shutil.copytree(source7, osp.join(self.log_path, 'code' + '/networks'))\n\n if not osp.exists(osp.join(self.log_path, 'code' + '/datasets')):\n shutil.copytree(source8, osp.join(self.log_path, 'code' + '/datasets'))\n\n if not osp.exists(osp.join(self.log_path, 'code' + '/utils')):\n shutil.copytree(source9, osp.join(self.log_path, 'code' + '/utils'))\n\n self.step = 1\n\n if self.opt.eval_only:\n self.val()\n if self.local_rank == 0:\n self.evaluation(evl_score=True)\n\n return None\n\n self.epoch = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.train_loader.sampler.set_epoch(self.epoch)\n self.run_epoch()\n self.save_model()\n\n\n self.val()\n if self.local_rank == 0:\n self.log_print(f\"Evaluation results at epoch {self.epoch} (step {self.step}):\")\n self.evaluation(evl_score=True)\n\n return None\n\n def evaluation(self, evl_score=False):\n\n batch_size = self.world_size\n\n if self.local_rank == 0:\n self.log_print(\"-> Evaluating {} in {}\".format('final', batch_size))\n\n errors = {}\n if self.opt.self_supervise:\n eval_types = ['scale-aware']\n else:\n eval_types = ['scale-ambiguous', 'scale-aware']\n for eval_type in eval_types:\n errors[eval_type] = {}\n\n for i in range(batch_size):\n while not osp.exists(osp.join(self.log_path, 'eval', '{}.pkl'.format(i))):\n time.sleep(10)\n time.sleep(5)\n with open(osp.join(self.log_path, 'eval', '{}.pkl'.format(i)), 'rb') as f:\n errors_i = pickle.load(f)\n for eval_type in eval_types:\n for camera_id in errors_i[eval_type].keys():\n if camera_id not in errors[eval_type].keys():\n errors[eval_type][camera_id] = []\n\n errors[eval_type][camera_id].append(errors_i[eval_type][camera_id])\n\n if self.opt.eval_occ and self.opt.use_semantic:\n if i == 0:\n errors['class_names'] = errors_i['class_names']\n errors['mIoU'] = [errors_i['mIoU']]\n errors['cnt'] = [errors_i['cnt']]\n else:\n errors['mIoU'].append(errors_i['mIoU'])\n errors['cnt'].append(errors_i['cnt'])\n elif self.opt.eval_occ:\n if i == 0:\n errors['acc'] = [errors_i['acc']]\n errors['comp'] = [errors_i['comp']]\n errors['f1'] = [errors_i['f1']]\n errors['acc_dist'] = [errors_i['acc_dist']]\n errors['cmpl_dist'] = [errors_i['cmpl_dist']]\n errors['cd'] = [errors_i['cd']]\n errors['cnt'] = [errors_i['cnt']]\n else:\n errors['acc'].append(errors_i['acc'])\n errors['comp'].append(errors_i['comp'])\n errors['f1'].append(errors_i['f1'])\n errors['cnt'].append(errors_i['cnt'])\n\n if self.opt.eval_occ and self.opt.use_semantic:\n class_names = errors['class_names']\n mIoUs = np.stack(errors['mIoU'], axis=0)\n cnts = np.array(errors['cnt'])\n weights = cnts / np.sum(cnts)\n IoUs = np.sum(mIoUs * np.expand_dims(weights, axis=1), axis=0)\n index_without_others = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16] # without 0 and 12\n index_without_empty = [1, 3, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16] # without 0, 2, 6, 12\n mIoU_without_others = np.mean(IoUs[index_without_others])\n mIoU_without_empty = np.mean(IoUs[index_without_empty])\n self.log_print(f\"Classes: {class_names}\")\n self.log_print(f\"IoUs: {IoUs}\")\n self.log_print(f\"mIoU without others: {mIoU_without_others}\")\n self.log_print(f\"mIoU without empty: {mIoU_without_empty}\")\n elif self.opt.eval_occ:\n acc = np.array(errors['acc'])\n comp = np.array(errors['comp'])\n f1 = np.array(errors['f1'])\n acc_dist = np.array(errors['acc_dist'])\n cmpl_dist = np.array(errors['cmpl_dist'])\n cd = np.array(errors['cd'])\n cnts = np.array(errors['cnt'])\n weights = cnts / np.sum(cnts)\n acc_mean = np.sum(acc * weights)\n comp_mean = np.sum(comp * weights)\n f1_mean = np.sum(f1 * weights)\n acc_dist_mean = np.sum(acc_dist * weights)\n cmpl_dist_mean = np.sum(cmpl_dist * weights)\n cd_mean = np.sum(cd * weights)\n self.log_print(f\"Precision: {acc_mean}\")\n self.log_print(f\"Recal: {comp_mean}\")\n self.log_print(f\"F1: {f1_mean}\")\n self.log_print(f\"Acc: {acc_dist_mean}\")\n self.log_print(f\"Comp: {cmpl_dist_mean}\")\n self.log_print(f\"CD: {cd_mean}\")\n\n num_sum = 0\n for eval_type in eval_types:\n for camera_id in errors[eval_type].keys():\n errors[eval_type][camera_id] = np.concatenate(errors[eval_type][camera_id], axis=0)\n\n if eval_type == 'scale-aware':\n num_sum += errors[eval_type][camera_id].shape[0]\n\n errors[eval_type][camera_id] = np.nanmean(errors[eval_type][camera_id], axis=0)\n\n for eval_type in eval_types:\n self.log_print(\"{} evaluation:\".format(eval_type))\n mean_errors_sum = 0\n for camera_id in errors[eval_type].keys():\n mean_errors_sum += errors[eval_type][camera_id]\n mean_errors_sum /= len(errors[eval_type].keys())\n errors[eval_type]['all'] = mean_errors_sum\n\n for camera_id in errors[eval_type].keys():\n mean_errors = errors[eval_type][camera_id]\n self.log_print(camera_id)\n self.log_print((\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n self.log_print((\"&{: 8.3f} \" * 7).format(*mean_errors.tolist()))\n\n if mean_errors_sum[0] < self.best_abs_rel:\n self.best_abs_rel = mean_errors_sum[0]\n self.best_result_str = (\"&{: 8.3f} \" * 7).format(*mean_errors_sum.tolist())\n self.log_print(\"best result ({}):\".format(eval_type))\n self.log_print((\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n self.log_print(self.best_result_str)\n\n assert num_sum == self.num_val\n\n\n def val(self, save_image=True):\n \"\"\"Validate the model on a single minibatch\n \"\"\"\n self.set_eval()\n\n errors = {}\n if self.opt.self_supervise:\n eval_types = ['scale-aware']\n else:\n eval_types = ['scale-ambiguous', 'scale-aware']\n for eval_type in eval_types:\n errors[eval_type] = {}\n\n self.models[\"encoder\"].eval()\n self.models[\"depth\"].eval()\n ratios_median = []\n\n print('begin eval!')\n total_time = []\n\n total_abs_rel_26 = []\n total_sq_rel_26 = []\n total_rmse_26 = []\n total_rmse_log_26 = []\n total_a1_26 = []\n total_a2_26 = []\n total_a3_26 = []\n\n # depth occupancy\n total_abs_rel_52 = []\n total_sq_rel_52 = []\n total_rmse_52 = []\n total_rmse_log_52 = []\n total_a1_52 = []\n total_a2_52 = []\n total_a3_52 = []\n \n if self.opt.use_semantic and self.opt.eval_occ:\n occ_eval_metrics = occ_metrics.Metric_mIoU(\n num_classes=18,\n use_lidar_mask=False,\n use_image_mask=True)\n elif self.opt.eval_occ:\n occ_eval_metrics = occ_metrics.Metric_FScore(\n use_image_mask=True)\n else:\n occ_eval_metrics = None\n\n total_evl_time = time.time()\n\n with torch.no_grad():\n loader = self.val_loader\n for idx, data in enumerate(loader):\n\n eps_time = time.time()\n\n input_color = data[(\"color\", 0, 0)].cuda()\n\n gt_depths = data[\"depth\"].cpu().numpy()\n camera_ids = data[\"id\"]\n\n features = self.models[\"encoder\"](input_color)\n\n output = self.models[\"depth\"](features, data, is_train=False)\n\n eps_time = time.time() - eps_time\n total_time.append(eps_time)\n\n if self.opt.volume_depth and self.opt.eval_occ:\n if self.opt.use_semantic:\n # mIoU, class IoU\n semantics_pred = output['pred_occ_logits'][0].argmax(0)\n occ_eval_metrics.add_batch(\n semantics_pred=semantics_pred.detach().cpu().numpy(),\n semantics_gt=data['semantics_3d'].detach().cpu().numpy(),\n mask_camera=data['mask_camera_3d'].detach().cpu().numpy().astype(bool),\n mask_lidar=None)\n \n if self.local_rank == 0 and idx % 20 == 0:\n _, miou, _ = occ_eval_metrics.count_miou()\n print('mIoU:', miou)\n\n else:\n # Acc, Comp, Precision, Recall, Chamfer, F1\n occ_prob = output['pred_occ_logits'][0, -1].sigmoid()\n if self.opt.last_free:\n occ_prob = 1.0 - occ_prob\n free_mask = occ_prob < 0.6 # TODO: threshold\n occ_pred = torch.zeros_like(free_mask, dtype=torch.long)\n occ_pred[free_mask] = 17\n occ_eval_metrics.add_batch(\n semantics_pred=occ_pred.detach().cpu().numpy(),\n semantics_gt=data['semantics_3d'].detach().cpu().numpy(),\n mask_camera=data['mask_camera_3d'].detach().cpu().numpy().astype(bool),\n mask_lidar=None)\n \n if self.local_rank == 0 and idx % 20 == 0:\n _, _, f1, _, _, cd, _ = occ_eval_metrics.count_fscore()\n print('f1:', f1)\n print('cd:', cd)\n\n if self.local_rank == 0 and idx % 100 == 0:\n print('single inference:(eps time:', eps_time, 'secs)')\n\n if self.opt.volume_depth:\n pred_disps_flip = output[(\"disp\", 0)]\n\n\n pred_disps = pred_disps_flip.cpu()[:, 0].numpy()\n\n concated_image_list = []\n concated_depth_list = []\n\n for i in range(pred_disps.shape[0]):\n\n camera_id = camera_ids[i]\n\n if camera_id not in list(errors['scale-aware']):\n errors['scale-aware'][camera_id] = []\n if 'scale-ambiguous' in errors.keys():\n errors['scale-ambiguous'][camera_id] = []\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n\n if self.opt.volume_depth:\n pred_depth = pred_disp\n\n if self.local_rank == 0 and idx % 100 == 0:\n print('volume rendering depth: min {}, max {}'.format(pred_depth.min(), pred_depth.max()))\n\n pred_depth = cv2.resize(pred_depth, (gt_width, gt_height))\n\n\n mask = np.logical_and(gt_depth > self.opt.min_depth_test, gt_depth < self.opt.max_depth_test)\n\n pred_depth_color = visualize_depth(pred_depth.copy())\n color = (input_color[i].cpu().permute(1, 2, 0).numpy()) * 255\n color = color[..., [2, 1, 0]]\n\n concated_image_list.append(color)\n concated_depth_list.append(cv2.resize(pred_depth_color.copy(), (self.opt.width, self.opt.height)))\n\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n\n ratio_median = np.median(gt_depth) / np.median(pred_depth)\n ratios_median.append(ratio_median)\n pred_depth_median = pred_depth.copy() * ratio_median\n\n if 'scale-ambiguous' in errors.keys():\n pred_depth_median[pred_depth_median < self.opt.min_depth_test] = self.opt.min_depth_test\n pred_depth_median[pred_depth_median > self.opt.max_depth_test] = self.opt.max_depth_test\n\n errors['scale-ambiguous'][camera_id].append(compute_errors(gt_depth, pred_depth_median))\n\n pred_depth[pred_depth < self.opt.min_depth_test] = self.opt.min_depth_test\n pred_depth[pred_depth > self.opt.max_depth_test] = self.opt.max_depth_test\n\n errors['scale-aware'][camera_id].append(compute_errors(gt_depth, pred_depth))\n\n\n save_frequency = self.opt.save_frequency\n\n if save_image and idx % save_frequency == 0 and self.local_rank == 0:\n print('idx:', idx)\n\n if self.opt.cam_N == 6:\n image_left_front_right = np.concatenate(\n (concated_image_list[1], concated_image_list[0], concated_image_list[5]), axis=1)\n image_left_rear_right = np.concatenate(\n (concated_image_list[4], concated_image_list[3], concated_image_list[2]), axis=1)\n\n image_surround_view = np.concatenate((image_left_front_right, image_left_rear_right), axis=0)\n\n depth_left_front_right = np.concatenate(\n (concated_depth_list[1], concated_depth_list[0], concated_depth_list[5]), axis=1)\n depth_right_rear_left = np.concatenate(\n (concated_depth_list[4], concated_depth_list[3], concated_depth_list[2]), axis=1)\n\n depth_surround_view = np.concatenate((depth_left_front_right, depth_right_rear_left), axis=0)\n surround_view = np.concatenate((image_surround_view, depth_surround_view), axis=0)\n \n elif self.opt.cam_N == 1:\n surround_view = np.concatenate((concated_image_list[0], concated_depth_list[0]), axis=0)\n\n # pdb.set_trace()\n cv2.imwrite('{}/visual_rgb_depth/{}-{}.jpg'.format(self.log_path, self.local_rank, idx), surround_view)\n\n\n vis_dic = {}\n vis_dic['opt'] = self.opt\n # vis_dic['depth_color'] = concated_depth_list\n # vis_dic['rgb'] = concated_image_list\n vis_dic['pose_spatial'] = data['pose_spatial'].detach().cpu().numpy()\n vis_dic['probability'] = output['density'].detach().cpu().numpy()\n \n np.save('{}/visual_feature/{}-{}.npy'.format(self.log_path, self.local_rank, idx), vis_dic)\n\n for eval_type in eval_types:\n for camera_id in errors[eval_type].keys():\n errors[eval_type][camera_id] = np.array(errors[eval_type][camera_id])\n \n if self.opt.use_semantic and self.opt.eval_occ:\n class_names, mIoU, cnt = occ_eval_metrics.count_miou()\n errors['class_names'] = class_names\n errors['mIoU'] = mIoU\n errors['cnt'] = cnt\n elif self.opt.eval_occ:\n acc, comp, f1, acc_dist, cmpl_dist, cd, cnt = occ_eval_metrics.count_fscore()\n errors['acc'] = acc\n errors['comp'] = comp\n errors['f1'] = f1\n errors['acc_dist'] = acc_dist\n errors['cmpl_dist'] = cmpl_dist\n errors['cd'] = cd\n errors['cnt'] = cnt\n\n with open(osp.join(self.log_path, 'eval', '{}.pkl'.format(self.local_rank)), 'wb') as f:\n pickle.dump(errors, f)\n\n eps_time = time.time() - total_evl_time\n\n if self.local_rank == 0:\n self.log_print('median: {}'.format(np.array(ratios_median).mean()))\n self.log_print('mean inference time: {}'.format(np.array(total_time).mean()))\n self.log_print('total evl time: {} h'.format(eps_time / 3600))\n\n print('finish eval!')\n\n self.set_train()\n\n def run_epoch(self):\n \"\"\"Run a single epoch of training and validation\n \"\"\"\n\n torch.autograd.set_detect_anomaly(True)\n if self.local_rank == 0:\n print(\"Training\")\n self.set_train()\n\n if self.local_rank == 0:\n self.log_print_train('self.epoch: {}, lr: {}'.format(self.epoch, self.model_lr_scheduler.get_last_lr()))\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.opt.use_fp16, init_scale=2**8)\n len_loader = len(self.train_loader)\n for batch_idx, inputs in enumerate(self.train_loader):\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n scaler.scale(losses[\"loss\"]).backward()\n scaler.step(self.model_optimizer)\n scaler.update()\n self.model_optimizer.zero_grad()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000\n late_phase = self.step % 200 == 0\n\n # pdb.set_trace()\n if early_phase or late_phase or (self.epoch == (self.opt.num_epochs - 1)):\n self.log_time(batch_idx, len_loader, duration, losses)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.step > 0 and self.step % self.opt.eval_frequency == 0 and self.opt.eval_frequency > 0:\n self.save_model()\n self.val()\n if self.local_rank == 0:\n self.evaluation()\n\n self.step += 1\n\n self.model_lr_scheduler.step()\n\n def process_batch(self, inputs):\n \"\"\"Pass a minibatch through the network and generate images and losses\n \"\"\"\n\n self.to_device(inputs)\n with torch.cuda.amp.autocast(enabled=self.opt.use_fp16):\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0][:self.opt.cam_N])\n features = [feat.float() for feat in features]\n outputs = self.models[\"depth\"](features, inputs)\n # Note that for volume depth, outputs[(\"disp\", 0)] is depth\n\n if self.opt.self_supervise:\n self.generate_images_pred(inputs, outputs)\n losses = self.compute_self_supervised_losses(inputs, outputs)\n else:\n raise NotImplementedError\n \n return outputs, losses\n\n def generate_images_pred(self, inputs, outputs):\n \"\"\"Generate the warped (reprojected) color images for a minibatch.\n Generated images are saved into the `outputs` dictionary.\n \"\"\"\n for scale in self.opt.scales:\n disp = outputs[(\"disp\", scale)]\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n disp = F.interpolate(\n disp, [self.opt.height, self.opt.width], mode=\"bilinear\", align_corners=False)\n if scale == 0:\n outputs[(\"disp\", scale)] = disp\n source_scale = 0\n\n if self.opt.volume_depth:\n depth = disp\n else:\n depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth, abs=False)\n \n outputs[(\"depth\", 0, scale)] = depth\n\n for i, frame_id in enumerate(self.opt.frame_ids[1:]):\n\n T = inputs[(\"cam_T_cam\", frame_id)]\n\n cam_points = self.backproject_depth[source_scale](\n depth, inputs[(\"inv_K\", 0, source_scale)])\n pix_coords = self.project_3d[source_scale](\n cam_points, inputs[(\"K\", frame_id, source_scale)], T)\n\n outputs[(\"sample\", frame_id, scale)] = pix_coords\n\n outputs[(\"color\", frame_id, scale)] = F.grid_sample(\n inputs[(\"color\", frame_id, source_scale)],\n outputs[(\"sample\", frame_id, scale)],\n padding_mode=\"border\", align_corners=True)\n\n if not self.opt.disable_automasking:\n outputs[(\"color_identity\", frame_id, scale)] = \\\n inputs[(\"color\", frame_id, source_scale)]\n \n def compute_reprojection_loss(self, pred, target):\n \"\"\"Computes reprojection loss between a batch of predicted and target images\n \"\"\"\n abs_diff = torch.abs(target - pred)\n l1_loss = abs_diff.mean(1, True)\n\n if self.opt.no_ssim:\n reprojection_loss = l1_loss\n else:\n ssim_loss = self.ssim(pred, target).mean(1, True)\n reprojection_loss = 0.85 * ssim_loss + 0.15 * l1_loss\n\n return reprojection_loss\n\n def compute_self_supervised_losses(self, inputs, outputs):\n \"\"\"Compute the reprojection and smoothness losses for a minibatch\n \"\"\"\n losses = {}\n total_loss = 0\n\n for scale in self.opt.scales:\n loss = 0\n reprojection_losses = []\n if self.opt.use_fix_mask:\n output_mask = []\n\n\n if self.opt.v1_multiscale:\n source_scale = scale\n else:\n source_scale = 0\n\n disp = outputs[(\"disp\", scale)]\n if self.opt.volume_depth: # in fact, it is depth\n disp = 1.0 / (disp + 1e-7)\n color = inputs[(\"color\", 0, scale)]\n target = inputs[(\"color\", 0, source_scale)]\n for frame_id in self.opt.frame_ids[1:]:\n pred = outputs[(\"color\", frame_id, scale)]\n reprojection_losses.append(self.compute_reprojection_loss(pred, target))\n\n reprojection_losses = torch.cat(reprojection_losses, 1)\n\n if not self.opt.disable_automasking:\n identity_reprojection_losses = []\n for frame_id in self.opt.frame_ids[1:]:\n pred = inputs[(\"color\", frame_id, source_scale)]\n identity_reprojection_losses.append(\n self.compute_reprojection_loss(pred, target))\n\n identity_reprojection_losses = torch.cat(identity_reprojection_losses, 1)\n\n if self.opt.avg_reprojection:\n identity_reprojection_loss = identity_reprojection_losses.mean(1, keepdim=True)\n else:\n # save both images, and do min all at once below\n identity_reprojection_loss = identity_reprojection_losses\n\n elif self.opt.predictive_mask:\n # use the predicted mask\n mask = outputs[\"predictive_mask\"][\"disp\", scale]\n if not self.opt.v1_multiscale:\n mask = F.interpolate(\n mask, [self.opt.height, self.opt.width],\n mode=\"bilinear\", align_corners=False)\n\n reprojection_losses *= mask\n\n # add a loss pushing mask to 1 (using nn.BCELoss for stability)\n weighting_loss = 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cuda())\n loss += weighting_loss.mean()\n\n if self.opt.use_fix_mask:\n reprojection_losses *= inputs[\"mask\"] #* output_mask\n\n if self.opt.avg_reprojection:\n reprojection_loss = reprojection_losses.mean(1, keepdim=True)\n else:\n reprojection_loss = reprojection_losses\n\n if not self.opt.disable_automasking:\n # add random numbers to break ties\n identity_reprojection_loss += torch.randn(\n identity_reprojection_loss.shape).cuda() * 0.00001\n\n combined = torch.cat((identity_reprojection_loss, reprojection_loss), dim=1)\n else:\n combined = reprojection_loss\n\n if combined.shape[1] == 1:\n to_optimise = combined\n else:\n to_optimise, idxs = torch.min(combined, dim=1)\n\n if not self.opt.disable_automasking:\n outputs[\"identity_selection/{}\".format(scale)] = (\n idxs > identity_reprojection_loss.shape[1] - 1).float()\n\n loss += to_optimise.mean()\n\n mean_disp = disp.mean(2, True).mean(3, True)\n norm_disp = disp / (mean_disp + 1e-7)\n smooth_loss = get_smooth_loss(norm_disp, color)\n\n loss += self.opt.disparity_smoothness * smooth_loss / (2 ** scale)\n \n losses[f\"loss_pe/{scale}\"] = loss\n\n semantic_loss = 0.0\n if self.opt.use_semantic and scale == 0:\n pred_semantic = outputs[(\"semantic\", 0)].float()\n target_semantic = inputs[\"semantic\"]\n \n # target_semantic[target_semantic > 0] = target_semantic[target_semantic > 0] - 1\n target_semantic[target_semantic > 0] = target_semantic[target_semantic > 0]\n \n target_semantic = F.interpolate(target_semantic.unsqueeze(1).float(), size=pred_semantic.shape[1:3], mode=\"nearest\").squeeze(1)\n \n semantic_loss += self.sem_criterion(pred_semantic.view(-1, self.opt.semantic_classes), target_semantic.view(-1).long())\n \n semantic_loss = self.opt.semantic_loss_weight * semantic_loss\n losses[f\"loss_semantic/{scale}\"] = semantic_loss\n \n loss_reg = 0\n for k, v in outputs.items():\n if isinstance(k, tuple) and k[0].startswith(\"loss\") and k[1] == scale:\n losses[f\"{k[0]}/{k[1]}\"] = v\n loss_reg += v\n \n total_loss += loss + loss_reg + semantic_loss\n losses[\"loss/{}\".format(scale)] = loss + loss_reg + semantic_loss\n\n total_loss /= self.num_scales\n losses[\"loss\"] = total_loss\n return losses\n\n def compute_depth_losses(self, inputs, outputs, losses):\n \"\"\"Compute depth metrics, to allow monitoring during training\n\n This isn't particularly accurate as it averages over the entire batch,\n so is only used to give an indication of validation performance\n \"\"\"\n depth_gt = inputs[\"depth_gt\"]\n mask = depth_gt > 0\n _, _, H, W = depth_gt.shape\n\n depth_pred = outputs[(\"depth\", 0, 0)].detach()\n depth_pred = torch.clamp(F.interpolate(\n depth_pred, [H, W], mode=\"bilinear\", align_corners=False), 1e-3, self.opt.max_depth)\n\n depth_gt = depth_gt[mask]\n depth_pred = depth_pred[mask]\n if 'cam_T_cam' not in inputs:\n depth_pred *= torch.median(depth_gt) / torch.median(depth_pred)\n\n depth_pred = torch.clamp(depth_pred, min=1e-3, max=self.opt.max_depth)\n\n depth_errors = compute_depth_errors(depth_gt, depth_pred)\n\n for i, metric in enumerate(self.depth_metric_names):\n losses[metric] = np.array(depth_errors[i].cpu())\n\n def log_time(self, batch_idx, len_loader, duration, loss_dict):\n \"\"\"Print a logging statement to the terminal\n \"\"\"\n if self.local_rank == 0:\n samples_per_sec = self.opt.batch_size / duration\n time_sofar = time.time() - self.start_time\n training_time_left = (self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0\n loss_info = ''\n for l, v in loss_dict.items():\n loss_info += \"{}: {:.4f} | \".format(l, v)\n print_string = \"epoch {:>2}/{:>2} | batch {:>5}/{:>5} | examples/s: {:3.1f}\" + \\\n \" | {}time elapsed: {} | time left: {}\"\n\n self.log_print_train(print_string.format(self.epoch+1, self.opt.num_epochs, batch_idx+1, len_loader, samples_per_sec, loss_info,\n sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left)))\n\n def log(self, mode, inputs, outputs, losses):\n \"\"\"Write an event to the tensorboard events file\n \"\"\"\n writer = self.writers[mode]\n for l, v in losses.items():\n writer.add_scalar(\"{}\".format(l), v, self.step)\n\n for j in range(min(4, self.opt.batch_size)): # write a maxmimum of four images\n for s in self.opt.scales:\n for frame_id in self.opt.frame_ids:\n writer.add_image(\n \"color_{}_{}/{}\".format(frame_id, s, j),\n inputs[(\"color\", frame_id, s)][j].data, self.step)\n if s == 0 and frame_id != 0:\n writer.add_image(\n \"color_pred_{}_{}/{}\".format(frame_id, s, j),\n outputs[(\"color\", frame_id, s)][j].data, self.step)\n\n writer.add_image(\n \"disp_{}/{}\".format(s, j),\n normalize_image(outputs[(\"disp\", s)][j]), self.step)\n\n if self.opt.predictive_mask:\n for f_idx, frame_id in enumerate(self.opt.frame_ids[1:]):\n writer.add_image(\n \"predictive_mask_{}_{}/{}\".format(frame_id, s, j),\n outputs[\"predictive_mask\"][(\"disp\", s)][j, f_idx][None, ...],\n self.step)\n\n elif not self.opt.disable_automasking:\n writer.add_image(\n \"automask_{}/{}\".format(s, j),\n outputs[\"identity_selection/{}\".format(s)][j][None, ...], self.step)\n\n def save_opts(self):\n \"\"\"Save options to disk so we know what we ran this experiment with\n \"\"\"\n models_dir = osp.join(self.log_path, \"models\")\n if not osp.exists(models_dir):\n os.makedirs(models_dir)\n os.makedirs(osp.join(self.log_path, \"eval\"), exist_ok=True)\n to_save = self.opt.__dict__.copy()\n\n with open(osp.join(models_dir, 'opt.json'), 'w') as f:\n json.dump(to_save, f, indent=2)\n\n def save_model(self):\n \"\"\"Save model weights to disk\n \"\"\"\n if self.local_rank == 0:\n save_folder = osp.join(self.log_path, \"models\", \"weights_{}\".format(self.step))\n if not osp.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n save_path = osp.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.module.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n torch.save(to_save, save_path)\n\n save_path = osp.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)\n\n def load_model(self):\n \"\"\"Load model(s) from disk\n \"\"\"\n self.opt.load_weights_folder = osp.expanduser(self.opt.load_weights_folder)\n\n if self.local_rank == 0:\n assert osp.isdir(self.opt.load_weights_folder), \\\n \"Cannot find folder {}\".format(self.opt.load_weights_folder)\n self.log_print(\"loading model from folder {}\".format(self.opt.load_weights_folder))\n\n for n in self.opt.models_to_load:\n\n if self.local_rank == 0:\n self.log_print(\"Loading {} weights...\".format(n))\n path = osp.join(self.opt.load_weights_folder, \"{}.pth\".format(n))\n model_dict = self.models[n].state_dict()\n pretrained_dict = torch.load(path, map_location=torch.device('cpu'))\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.models[n].load_state_dict(model_dict)\n\n def load_optimizer(self):\n # loading adam state\n optimizer_load_path = osp.join(self.opt.load_weights_folder, \"adam.pth\")\n if osp.isfile(optimizer_load_path):\n if self.local_rank == 0:\n self.log_print(\"Loading Adam weights\")\n optimizer_dict = torch.load(optimizer_load_path)\n self.model_optimizer.load_state_dict(optimizer_dict)\n else:\n self.log_print(\"Cannot find Adam weights so Adam is randomly initialized\")\n\n def log_print(self, str):\n print(str)\n with open(osp.join(self.log_path, 'log.txt'), 'a') as f:\n f.writelines(str + '\\n')\n\n\n def log_print_train(self, str):\n print(str)\n with open(osp.join(self.log_path, 'log_train.txt'), 'a') as f:\n f.writelines(str + '\\n')" }, { "identifier": "MonodepthOptions", "path": "options.py", "snippet": "class MonodepthOptions:\n\n def __init__(self):\n self.parser = configargparse.ArgumentParser()\n\n self.parser.add_argument('--config', is_config_file=True,\n help='config file path')\n self.parser.add_argument(\"--debug\", action=\"store_true\")\n self.parser.add_argument(\"--eval_only\",\n help=\"if set, only evaluation\",\n action=\"store_true\")\n self.parser.add_argument(\"--local_rank\", default=0, type=int)\n\n # paths\n self.parser.add_argument(\"--dataroot\", \n type=str, \n help=\"the root for the ddad and nuscenes dataset\",\n default='data/nuscenes')\n self.parser.add_argument(\"--model_name\",\n type=str,\n help=\"the name of the folder to save the model in\",\n default=\"nusc-depth\")\n self.parser.add_argument(\"--log_dir\",\n type=str,\n help=\"log directory\",\n default='logs')\n\n # method options\n self.parser.add_argument(\"--volume_depth\",\n action=\"store_true\",\n help=\"if set, using the depth from volume rendering, rather than the depthdecoder\")\n self.parser.add_argument(\"--voxels_size\",\n type=int, nargs='+',\n default=[24, 300, 300],\n help='the resolution of the voxel for rendering: Z, Y, X = 24, 300, 300')\n self.parser.add_argument(\"--real_size\",\n type=float, nargs='+',\n default=[-40, 40, -40, 40, -1, 5.4],\n help='the real scale of the voxel: XMIN, XMAX, ZMIN, ZMAX, YMIN, YMAX')\n\n self.parser.add_argument(\"--self_supervise\", \n action=\"store_true\",\n help=\"if set, using the self-supervised mothod\")\n self.parser.add_argument(\"--eval_occ\",\n action=\"store_true\",\n help=\"if set, eval the occupancy score\")\n\n self.parser.add_argument(\"--contracted_coord\",\n action=\"store_true\",\n help=\"if set, using the contracted coordinate\")\n self.parser.add_argument(\"--contracted_ratio\",\n type=float, default=0.8,\n help=\"the threshold for the contracted coordinate\")\n self.parser.add_argument(\"--infinite_range\",\n action=\"store_true\",\n help=\"sampling strategy for contracted coordinate\")\n\n self.parser.add_argument(\"--auxiliary_frame\",\n action=\"store_true\",\n help=\"if set, using auxiliary images\")\n\n self.parser.add_argument(\"--use_semantic\",\n help=\"if set, use semantic segmentation for training\",\n action=\"store_true\")\n self.parser.add_argument(\"--semantic_classes\",\n type=int, default=17,\n help=\"the output channel of the semantic_head\")\n self.parser.add_argument(\"--class_frequencies\",\n nargs=\"+\", type=int,\n default= [0, 18164955800, 734218842, 2336187448, 10996756106, 395414611,\n 638889260, 651023279, 117046208, 985341947, 7303776233, 43131984997,\n 0, 9342674867, 9525824742, 51204832885, 22848065525, 31841090018])\n self.parser.add_argument(\"--semantic_sample_ratio\",\n type=float, default=0.25,\n help=\"sample less points for semantic to accelerate the training\")\n self.parser.add_argument(\"--last_free\",\n action=\"store_true\",\n help=\"if the last class is free space\")\n\n # DATASET options\n self.parser.add_argument(\"--dataset\",\n type=str,\n help=\"dataset to train on\",\n default=\"nusc\")\n self.parser.add_argument(\"--cam_N\",\n type=int,\n help=\"THE NUM OF CAM\",\n default=6)\n self.parser.add_argument(\"--use_fix_mask\",\n help=\"if set, use self-occlusion mask (only for DDAD)\",\n action=\"store_true\")\n\n # OPTIMIZATION options\n self.parser.add_argument(\"--use_fp16\",\n action=\"store_true\",\n help=\"if set, using mixed precision training\")\n self.parser.add_argument(\"--batch_size\",\n type=int,\n help=\"batch size\",\n default=6)\n self.parser.add_argument(\"--B\",\n type=int,\n help=\"real batch size\",\n default=1)\n\n self.parser.add_argument(\"--learning_rate\",\n type=float,\n help=\"learning rate\",\n default=1e-4)\n self.parser.add_argument(\"--weight_decay\",\n type=float,\n help=\"weight decay\",\n default=0.0)\n\n self.parser.add_argument(\"--num_epochs\",\n type=int,\n help=\"number of epochs\",\n default=12)\n self.parser.add_argument(\"--scheduler_step_size\",\n type=int,\n help=\"step size of the scheduler\",\n default=10)\n\n # DEPTH ESTIMATION options\n self.parser.add_argument(\"--frame_ids\",\n nargs=\"+\",\n type=int,\n help=\"frames to load, currently only support for 3 frames\",\n default=[0, -1, 1])\n self.parser.add_argument(\"--v1_multiscale\",\n help=\"if set, uses monodepth v1 multiscale\",\n action=\"store_true\")\n self.parser.add_argument(\"--avg_reprojection\",\n help=\"if set, uses average reprojection loss\",\n action=\"store_true\")\n self.parser.add_argument(\"--disable_automasking\",\n help=\"if set, doesn't do auto-masking\",\n action=\"store_true\")\n self.parser.add_argument(\"--predictive_mask\",\n help=\"if set, uses a predictive masking scheme as in Zhou et al\",\n action=\"store_true\")\n self.parser.add_argument(\"--no_ssim\",\n help=\"if set, disables ssim in the loss\",\n action=\"store_true\")\n self.parser.add_argument(\"--weights_init\",\n type=str,\n help=\"pretrained or scratch\",\n default=\"pretrained\",\n choices=[\"pretrained\", \"scratch\"])\n self.parser.add_argument(\"--scales\",\n type=int, nargs=\"+\",\n help=\"scales used in the loss\",\n default=[0])\n # self.parser.add_argument(\"--pose_model_input\",\n # type=str,\n # help=\"how many images the pose network gets\",\n # default=\"pairs\",\n # choices=[\"pairs\", \"all\"])\n # self.parser.add_argument(\"--pose_model_type\",\n # type=str,\n # help=\"normal or shared\",\n # default=\"separate_resnet\")\n\n # SYSTEM options\n self.parser.add_argument(\"--num_workers\",\n type=int,\n help=\"number of dataloader workers\",\n default=4)\n\n # LOADING options\n self.parser.add_argument(\"--load_weights_folder\",\n type=str,\n help=\"name of model to load\")\n\n self.parser.add_argument(\"--models_to_load\",\n nargs=\"+\",\n type=str,\n help=\"models to load\",\n default=[\"encoder\", \"depth\"])\n\n # LOGGING options\n self.parser.add_argument(\"--log_frequency\",\n type=int,\n help=\"number of steps between each log\",\n default=25)\n self.parser.add_argument(\"--save_frequency\",\n type=int,\n help=\"save frequency for visualization\",\n default=100)\n self.parser.add_argument(\"--eval_frequency\",\n type=int,\n help=\"number of steps between each save\",\n default=1000)\n\n # RENDERING options\n self.parser.add_argument(\"--render_type\",\n type=str,\n help=\"rednering by the density or probability [density, prob, neus, volsdf]\",\n default='prob')\n self.parser.add_argument(\"--stepsize\",\n type=float,\n help=\"stepsize (in voxel) for rendering\",\n default=0.5)\n\n # HYPERPARAMETERS\n self.parser.add_argument(\"--semantic_loss_weight\",\n type=float, default=0.05,\n help=\"the weight for the semantic loss\")\n self.parser.add_argument(\"--disparity_smoothness\",\n type=float,\n help=\"disparity smoothness weight\",\n default=0.001)\n \n self.parser.add_argument(\"--height_ori\",\n type=int,\n help=\"original input image height\",\n default=1216)\n self.parser.add_argument(\"--width_ori\",\n type=int,\n help=\"original input image width\",\n default=1936)\n\n self.parser.add_argument(\"--height\",\n type=int, default=336,\n help=\"input image height\")\n self.parser.add_argument(\"--width\",\n type=int, default=672,\n help=\"input image width\")\n self.parser.add_argument(\"--render_h\",\n type=int, default=224,\n help=\"input image height\")\n self.parser.add_argument(\"--render_w\",\n type=int, default=352,\n help=\"input image width\")\n \n self.parser.add_argument(\"--weight_entropy_last\",\n type=float, default=0.0)\n self.parser.add_argument(\"--weight_distortion\",\n type=float, default=0.0)\n self.parser.add_argument(\"--weight_sparse_reg\",\n type=float, default=0.0)\n\n self.parser.add_argument(\"--min_depth\",\n type=float,\n help=\"minimum depth\",\n default=0.1)\n self.parser.add_argument(\"--max_depth\",\n type=float,\n help=\"maximum depth\",\n default=80.0)\n\n self.parser.add_argument(\"--min_depth_test\",\n type=float,\n help=\"the min depth for the evaluation\",\n default=0.1)\n self.parser.add_argument(\"--max_depth_test\",\n type=float,\n help=\"the max depth for the evaluation\",\n default=80.0)\n\n self.parser.add_argument(\"--en_lr\",\n type=float,\n help=\"learning rate for encoder in volume rendering\",\n default=0.0001)\n self.parser.add_argument(\"--de_lr\",\n type=float,\n help=\"learning rate for decoder (3D CNN) in volume rendering\",\n default=0.001)\n\n self.parser.add_argument(\"--aggregation\",\n type=str,\n help=\"the type of the feature aggregation [mlp 3dcnn 2dcnn]\",\n default= '3dcnn')\n\n self.parser.add_argument(\"--position\", type=str,\n help=\"rednering by the density or probability [No, embedding, embedding1]\",\n default='embedding')\n\n self.parser.add_argument(\"--data_type\", type=str,\n help=\" data size for traing and testing - > [train_all, all, mini, tiny]\",\n default='all')\n\n self.parser.add_argument(\"--input_channel\", type=int, help=\"the final feature channel in the encoder\",\n default=64)\n\n self.parser.add_argument(\"--con_channel\", type=int, help=\"the final feature channel in the encoder\",\n default=16)\n\n self.parser.add_argument(\"--out_channel\", type=int, help=\"the output channel of the voxel\",\n default=1)\n\n self.parser.add_argument(\"--encoder\", type=str,\n help=\"the method for the comparison [101, 50]\", default='101')\n\n\n def parse(self):\n self.options = self.parser.parse_args()\n return self.options" } ]
import torch import numpy as np from runner import Runer from options import MonodepthOptions
13,641
# Copyright Niantic 2019. Patent Pending. All rights reserved. # # This software is licensed under the terms of the Monodepth2 licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. from __future__ import absolute_import, division, print_function options = MonodepthOptions() opts = options.parse() def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True if __name__ == "__main__": setup_seed(42)
# Copyright Niantic 2019. Patent Pending. All rights reserved. # # This software is licensed under the terms of the Monodepth2 licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. from __future__ import absolute_import, division, print_function options = MonodepthOptions() opts = options.parse() def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True if __name__ == "__main__": setup_seed(42)
trainer = Runer(opts)
0
2023-12-14 15:00:21+00:00
16k
modelscope/richdreamer
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from einops import rearrange, repeat from functools import partial from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface,) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like,) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl,) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import (count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat,)
12,580
new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
12
2023-12-06 07:53:11+00:00
16k
rehg-lab/RAVE
annotator/oneformer/detectron2/export/caffe2_patch.py
[ { "identifier": "poolers", "path": "annotator/oneformer/detectron2/modeling/poolers.py", "snippet": "def assign_boxes_to_levels(\r\n box_lists: List[Boxes],\r\n min_level: int,\r\n max_level: int,\r\n canonical_box_size: int,\r\n canonical_level: int,\r\n):\r\ndef _convert_boxes_to_pooler_format(boxes: torch.Tensor, sizes: torch.Tensor) -> torch.Tensor:\r\ndef convert_boxes_to_pooler_format(box_lists: List[Boxes]):\r\ndef _create_zeros(\r\n batch_target: Optional[torch.Tensor],\r\n channels: int,\r\n height: int,\r\n width: int,\r\n like_tensor: torch.Tensor,\r\n) -> torch.Tensor:\r\n def __init__(\r\n self,\r\n output_size,\r\n scales,\r\n sampling_ratio,\r\n pooler_type,\r\n canonical_box_size=224,\r\n canonical_level=4,\r\n ):\r\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\r\nclass ROIPooler(nn.Module):\r" }, { "identifier": "rpn", "path": "annotator/oneformer/detectron2/modeling/proposal_generator/rpn.py", "snippet": "RPN_HEAD_REGISTRY = Registry(\"RPN_HEAD\")\r\n N = pred_anchor_deltas[0].shape[0]\r\n B = anchors_i.tensor.size(1)\r\ndef build_rpn_head(cfg, input_shape):\r\n def __init__(\r\n self, *, in_channels: int, num_anchors: int, box_dim: int = 4, conv_dims: List[int] = (-1,)\r\n ):\r\n def _get_rpn_conv(self, in_channels, out_channels):\r\n def from_config(cls, cfg, input_shape):\r\n def forward(self, features: List[torch.Tensor]):\r\n def __init__(\r\n self,\r\n *,\r\n in_features: List[str],\r\n head: nn.Module,\r\n anchor_generator: nn.Module,\r\n anchor_matcher: Matcher,\r\n box2box_transform: Box2BoxTransform,\r\n batch_size_per_image: int,\r\n positive_fraction: float,\r\n pre_nms_topk: Tuple[float, float],\r\n post_nms_topk: Tuple[float, float],\r\n nms_thresh: float = 0.7,\r\n min_box_size: float = 0.0,\r\n anchor_boundary_thresh: float = -1.0,\r\n loss_weight: Union[float, Dict[str, float]] = 1.0,\r\n box_reg_loss_type: str = \"smooth_l1\",\r\n smooth_l1_beta: float = 0.0,\r\n ):\r\n def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):\r\n def _subsample_labels(self, label):\r\n def label_and_sample_anchors(\r\n self, anchors: List[Boxes], gt_instances: List[Instances]\r\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\r\n def losses(\r\n self,\r\n anchors: List[Boxes],\r\n pred_objectness_logits: List[torch.Tensor],\r\n gt_labels: List[torch.Tensor],\r\n pred_anchor_deltas: List[torch.Tensor],\r\n gt_boxes: List[torch.Tensor],\r\n ) -> Dict[str, torch.Tensor]:\r\n def forward(\r\n self,\r\n images: ImageList,\r\n features: Dict[str, torch.Tensor],\r\n gt_instances: Optional[List[Instances]] = None,\r\n ):\r\n def predict_proposals(\r\n self,\r\n anchors: List[Boxes],\r\n pred_objectness_logits: List[torch.Tensor],\r\n pred_anchor_deltas: List[torch.Tensor],\r\n image_sizes: List[Tuple[int, int]],\r\n ):\r\n def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]):\r\nclass StandardRPNHead(nn.Module):\r\nclass RPN(nn.Module):\r" }, { "identifier": "keypoint_head", "path": "annotator/oneformer/detectron2/modeling/roi_heads/keypoint_head.py", "snippet": "_TOTAL_SKIPPED = 0\r\nROI_KEYPOINT_HEAD_REGISTRY = Registry(\"ROI_KEYPOINT_HEAD\")\r\n N, K, H, W = pred_keypoint_logits.shape\r\ndef build_keypoint_head(cfg, input_shape):\r\ndef keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer):\r\ndef keypoint_rcnn_inference(pred_keypoint_logits: torch.Tensor, pred_instances: List[Instances]):\r\n def __init__(self, *, num_keypoints, loss_weight=1.0, loss_normalizer=1.0):\r\n def from_config(cls, cfg, input_shape):\r\n def forward(self, x, instances: List[Instances]):\r\n def layers(self, x):\r\n def __init__(self, input_shape, *, num_keypoints, conv_dims, **kwargs):\r\n def from_config(cls, cfg, input_shape):\r\n def layers(self, x):\r\nclass BaseKeypointRCNNHead(nn.Module):\r\nclass KRCNNConvDeconvUpsampleHead(BaseKeypointRCNNHead, nn.Sequential):\r" }, { "identifier": "mask_head", "path": "annotator/oneformer/detectron2/modeling/roi_heads/mask_head.py", "snippet": "ROI_MASK_HEAD_REGISTRY = Registry(\"ROI_MASK_HEAD\")\r\ndef mask_rcnn_loss(pred_mask_logits: torch.Tensor, instances: List[Instances], vis_period: int = 0):\r\ndef mask_rcnn_inference(pred_mask_logits: torch.Tensor, pred_instances: List[Instances]):\r\n def __init__(self, *, loss_weight: float = 1.0, vis_period: int = 0):\r\n def from_config(cls, cfg, input_shape):\r\n def forward(self, x, instances: List[Instances]):\r\n def layers(self, x):\r\n def __init__(self, input_shape: ShapeSpec, *, num_classes, conv_dims, conv_norm=\"\", **kwargs):\r\n def from_config(cls, cfg, input_shape):\r\n def layers(self, x):\r\ndef build_mask_head(cfg, input_shape):\r\nclass BaseMaskRCNNHead(nn.Module):\r\nclass MaskRCNNConvUpsampleHead(BaseMaskRCNNHead, nn.Sequential):\r" }, { "identifier": "FastRCNNOutputLayers", "path": "annotator/oneformer/detectron2/modeling/roi_heads/fast_rcnn.py", "snippet": "class FastRCNNOutputLayers(nn.Module):\r\n \"\"\"\r\n Two linear layers for predicting Fast R-CNN outputs:\r\n\r\n 1. proposal-to-detection box regression deltas\r\n 2. classification scores\r\n \"\"\"\r\n\r\n @configurable\r\n def __init__(\r\n self,\r\n input_shape: ShapeSpec,\r\n *,\r\n box2box_transform,\r\n num_classes: int,\r\n test_score_thresh: float = 0.0,\r\n test_nms_thresh: float = 0.5,\r\n test_topk_per_image: int = 100,\r\n cls_agnostic_bbox_reg: bool = False,\r\n smooth_l1_beta: float = 0.0,\r\n box_reg_loss_type: str = \"smooth_l1\",\r\n loss_weight: Union[float, Dict[str, float]] = 1.0,\r\n use_fed_loss: bool = False,\r\n use_sigmoid_ce: bool = False,\r\n get_fed_loss_cls_weights: Optional[Callable] = None,\r\n fed_loss_num_classes: int = 50,\r\n ):\r\n \"\"\"\r\n NOTE: this interface is experimental.\r\n\r\n Args:\r\n input_shape (ShapeSpec): shape of the input feature to this module\r\n box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):\r\n num_classes (int): number of foreground classes\r\n test_score_thresh (float): threshold to filter predictions results.\r\n test_nms_thresh (float): NMS threshold for prediction results.\r\n test_topk_per_image (int): number of top predictions to produce per image.\r\n cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression\r\n smooth_l1_beta (float): transition point from L1 to L2 loss. Only used if\r\n `box_reg_loss_type` is \"smooth_l1\"\r\n box_reg_loss_type (str): Box regression loss type. One of: \"smooth_l1\", \"giou\",\r\n \"diou\", \"ciou\"\r\n loss_weight (float|dict): weights to use for losses. Can be single float for weighting\r\n all losses, or a dict of individual weightings. Valid dict keys are:\r\n * \"loss_cls\": applied to classification loss\r\n * \"loss_box_reg\": applied to box regression loss\r\n use_fed_loss (bool): whether to use federated loss which samples additional negative\r\n classes to calculate the loss\r\n use_sigmoid_ce (bool): whether to calculate the loss using weighted average of binary\r\n cross entropy with logits. This could be used together with federated loss\r\n get_fed_loss_cls_weights (Callable): a callable which takes dataset name and frequency\r\n weight power, and returns the probabilities to sample negative classes for\r\n federated loss. The implementation can be found in\r\n detectron2/data/detection_utils.py\r\n fed_loss_num_classes (int): number of federated classes to keep in total\r\n \"\"\"\r\n super().__init__()\r\n if isinstance(input_shape, int): # some backward compatibility\r\n input_shape = ShapeSpec(channels=input_shape)\r\n self.num_classes = num_classes\r\n input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1)\r\n # prediction layer for num_classes foreground classes and one background class (hence + 1)\r\n self.cls_score = nn.Linear(input_size, num_classes + 1)\r\n num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes\r\n box_dim = len(box2box_transform.weights)\r\n self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)\r\n\r\n nn.init.normal_(self.cls_score.weight, std=0.01)\r\n nn.init.normal_(self.bbox_pred.weight, std=0.001)\r\n for l in [self.cls_score, self.bbox_pred]:\r\n nn.init.constant_(l.bias, 0)\r\n\r\n self.box2box_transform = box2box_transform\r\n self.smooth_l1_beta = smooth_l1_beta\r\n self.test_score_thresh = test_score_thresh\r\n self.test_nms_thresh = test_nms_thresh\r\n self.test_topk_per_image = test_topk_per_image\r\n self.box_reg_loss_type = box_reg_loss_type\r\n if isinstance(loss_weight, float):\r\n loss_weight = {\"loss_cls\": loss_weight, \"loss_box_reg\": loss_weight}\r\n self.loss_weight = loss_weight\r\n self.use_fed_loss = use_fed_loss\r\n self.use_sigmoid_ce = use_sigmoid_ce\r\n self.fed_loss_num_classes = fed_loss_num_classes\r\n\r\n if self.use_fed_loss:\r\n assert self.use_sigmoid_ce, \"Please use sigmoid cross entropy loss with federated loss\"\r\n fed_loss_cls_weights = get_fed_loss_cls_weights()\r\n assert (\r\n len(fed_loss_cls_weights) == self.num_classes\r\n ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\r\n self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\r\n\r\n @classmethod\r\n def from_config(cls, cfg, input_shape):\r\n return {\r\n \"input_shape\": input_shape,\r\n \"box2box_transform\": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),\r\n # fmt: off\r\n \"num_classes\" : cfg.MODEL.ROI_HEADS.NUM_CLASSES,\r\n \"cls_agnostic_bbox_reg\" : cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,\r\n \"smooth_l1_beta\" : cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,\r\n \"test_score_thresh\" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,\r\n \"test_nms_thresh\" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,\r\n \"test_topk_per_image\" : cfg.TEST.DETECTIONS_PER_IMAGE,\r\n \"box_reg_loss_type\" : cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE,\r\n \"loss_weight\" : {\"loss_box_reg\": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT}, # noqa\r\n \"use_fed_loss\" : cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS,\r\n \"use_sigmoid_ce\" : cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE,\r\n \"get_fed_loss_cls_weights\" : lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER), # noqa\r\n \"fed_loss_num_classes\" : cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES,\r\n # fmt: on\r\n }\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: per-region features of shape (N, ...) for N bounding boxes to predict.\r\n\r\n Returns:\r\n (Tensor, Tensor):\r\n First tensor: shape (N,K+1), scores for each of the N box. Each row contains the\r\n scores for K object categories and 1 background class.\r\n\r\n Second tensor: bounding box regression deltas for each box. Shape is shape (N,Kx4),\r\n or (N,4) for class-agnostic regression.\r\n \"\"\"\r\n if x.dim() > 2:\r\n x = torch.flatten(x, start_dim=1)\r\n scores = self.cls_score(x)\r\n proposal_deltas = self.bbox_pred(x)\r\n return scores, proposal_deltas\r\n\r\n def losses(self, predictions, proposals):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were used\r\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\r\n ``gt_classes`` are expected.\r\n\r\n Returns:\r\n Dict[str, Tensor]: dict of losses\r\n \"\"\"\r\n scores, proposal_deltas = predictions\r\n\r\n # parse classification outputs\r\n gt_classes = (\r\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\r\n )\r\n _log_classification_stats(scores, gt_classes)\r\n\r\n # parse box regression outputs\r\n if len(proposals):\r\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\r\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\r\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\r\n # should not be included in regression loss computation.\r\n # Here we just use proposal_boxes as an arbitrary placeholder because its\r\n # value won't be used in self.box_reg_loss().\r\n gt_boxes = cat(\r\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\r\n dim=0,\r\n )\r\n else:\r\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)\r\n\r\n if self.use_sigmoid_ce:\r\n loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)\r\n else:\r\n loss_cls = cross_entropy(scores, gt_classes, reduction=\"mean\")\r\n\r\n losses = {\r\n \"loss_cls\": loss_cls,\r\n \"loss_box_reg\": self.box_reg_loss(\r\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes\r\n ),\r\n }\r\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}\r\n\r\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/fed_loss.py # noqa\r\n # with slight modifications\r\n def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\r\n \"\"\"\r\n Args:\r\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\r\n num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\r\n Will sample negative classes if number of unique gt_classes is smaller than this value.\r\n num_classes: number of foreground classes\r\n weight: probabilities used to sample negative classes\r\n\r\n Returns:\r\n Tensor:\r\n classes to keep when calculating the federated loss, including both unique gt\r\n classes and sampled negative classes.\r\n \"\"\"\r\n unique_gt_classes = torch.unique(gt_classes)\r\n prob = unique_gt_classes.new_ones(num_classes + 1).float()\r\n prob[-1] = 0\r\n if len(unique_gt_classes) < num_fed_loss_classes:\r\n prob[:num_classes] = weight.float().clone()\r\n prob[unique_gt_classes] = 0\r\n sampled_negative_classes = torch.multinomial(\r\n prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\r\n )\r\n fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\r\n else:\r\n fed_loss_classes = unique_gt_classes\r\n return fed_loss_classes\r\n\r\n # Implementation from https://github.com/xingyizhou/CenterNet2/blob/master/projects/CenterNet2/centernet/modeling/roi_heads/custom_fast_rcnn.py#L113 # noqa\r\n # with slight modifications\r\n def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):\r\n \"\"\"\r\n Args:\r\n pred_class_logits: shape (N, K+1), scores for each of the N box. Each row contains the\r\n scores for K object categories and 1 background class\r\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\r\n \"\"\"\r\n if pred_class_logits.numel() == 0:\r\n return pred_class_logits.new_zeros([1])[0]\r\n\r\n N = pred_class_logits.shape[0]\r\n K = pred_class_logits.shape[1] - 1\r\n\r\n target = pred_class_logits.new_zeros(N, K + 1)\r\n target[range(len(gt_classes)), gt_classes] = 1\r\n target = target[:, :K]\r\n\r\n cls_loss = F.binary_cross_entropy_with_logits(\r\n pred_class_logits[:, :-1], target, reduction=\"none\"\r\n )\r\n\r\n if self.use_fed_loss:\r\n fed_loss_classes = self.get_fed_loss_classes(\r\n gt_classes,\r\n num_fed_loss_classes=self.fed_loss_num_classes,\r\n num_classes=K,\r\n weight=self.fed_loss_cls_weights,\r\n )\r\n fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\r\n fed_loss_classes_mask[fed_loss_classes] = 1\r\n fed_loss_classes_mask = fed_loss_classes_mask[:K]\r\n weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\r\n else:\r\n weight = 1\r\n\r\n loss = torch.sum(cls_loss * weight) / N\r\n return loss\r\n\r\n def box_reg_loss(self, proposal_boxes, gt_boxes, pred_deltas, gt_classes):\r\n \"\"\"\r\n Args:\r\n proposal_boxes/gt_boxes are tensors with the same shape (R, 4 or 5).\r\n pred_deltas has shape (R, 4 or 5), or (R, num_classes * (4 or 5)).\r\n gt_classes is a long tensor of shape R, the gt class label of each proposal.\r\n R shall be the number of proposals.\r\n \"\"\"\r\n box_dim = proposal_boxes.shape[1] # 4 or 5\r\n # Regression loss is only computed for foreground proposals (those matched to a GT)\r\n fg_inds = nonzero_tuple((gt_classes >= 0) & (gt_classes < self.num_classes))[0]\r\n if pred_deltas.shape[1] == box_dim: # cls-agnostic regression\r\n fg_pred_deltas = pred_deltas[fg_inds]\r\n else:\r\n fg_pred_deltas = pred_deltas.view(-1, self.num_classes, box_dim)[\r\n fg_inds, gt_classes[fg_inds]\r\n ]\r\n\r\n loss_box_reg = _dense_box_regression_loss(\r\n [proposal_boxes[fg_inds]],\r\n self.box2box_transform,\r\n [fg_pred_deltas.unsqueeze(0)],\r\n [gt_boxes[fg_inds]],\r\n ...,\r\n self.box_reg_loss_type,\r\n self.smooth_l1_beta,\r\n )\r\n\r\n # The reg loss is normalized using the total number of regions (R), not the number\r\n # of foreground regions even though the box regression loss is only defined on\r\n # foreground regions. Why? Because doing so gives equal training influence to\r\n # each foreground example. To see how, consider two different minibatches:\r\n # (1) Contains a single foreground region\r\n # (2) Contains 100 foreground regions\r\n # If we normalize by the number of foreground regions, the single example in\r\n # minibatch (1) will be given 100 times as much influence as each foreground\r\n # example in minibatch (2). Normalizing by the total number of regions, R,\r\n # means that the single example in minibatch (1) and each of the 100 examples\r\n # in minibatch (2) are given equal influence.\r\n return loss_box_reg / max(gt_classes.numel(), 1.0) # return 0 if empty\r\n\r\n def inference(self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions. The ``proposal_boxes`` field is expected.\r\n\r\n Returns:\r\n list[Instances]: same as `fast_rcnn_inference`.\r\n list[Tensor]: same as `fast_rcnn_inference`.\r\n \"\"\"\r\n boxes = self.predict_boxes(predictions, proposals)\r\n scores = self.predict_probs(predictions, proposals)\r\n image_shapes = [x.image_size for x in proposals]\r\n return fast_rcnn_inference(\r\n boxes,\r\n scores,\r\n image_shapes,\r\n self.test_score_thresh,\r\n self.test_nms_thresh,\r\n self.test_topk_per_image,\r\n )\r\n\r\n def predict_boxes_for_gt_classes(self, predictions, proposals):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were used\r\n to compute predictions. The fields ``proposal_boxes``, ``gt_classes`` are expected.\r\n\r\n Returns:\r\n list[Tensor]:\r\n A list of Tensors of predicted boxes for GT classes in case of\r\n class-specific box head. Element i of the list has shape (Ri, B), where Ri is\r\n the number of proposals for image i and B is the box dimension (4 or 5)\r\n \"\"\"\r\n if not len(proposals):\r\n return []\r\n scores, proposal_deltas = predictions\r\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\r\n N, B = proposal_boxes.shape\r\n predict_boxes = self.box2box_transform.apply_deltas(\r\n proposal_deltas, proposal_boxes\r\n ) # Nx(KxB)\r\n\r\n K = predict_boxes.shape[1] // B\r\n if K > 1:\r\n gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)\r\n # Some proposals are ignored or have a background class. Their gt_classes\r\n # cannot be used as index.\r\n gt_classes = gt_classes.clamp_(0, K - 1)\r\n\r\n predict_boxes = predict_boxes.view(N, K, B)[\r\n torch.arange(N, dtype=torch.long, device=predict_boxes.device), gt_classes\r\n ]\r\n num_prop_per_image = [len(p) for p in proposals]\r\n return predict_boxes.split(num_prop_per_image)\r\n\r\n def predict_boxes(\r\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\r\n ):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions. The ``proposal_boxes`` field is expected.\r\n\r\n Returns:\r\n list[Tensor]:\r\n A list of Tensors of predicted class-specific or class-agnostic boxes\r\n for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is\r\n the number of proposals for image i and B is the box dimension (4 or 5)\r\n \"\"\"\r\n if not len(proposals):\r\n return []\r\n _, proposal_deltas = predictions\r\n num_prop_per_image = [len(p) for p in proposals]\r\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)\r\n predict_boxes = self.box2box_transform.apply_deltas(\r\n proposal_deltas,\r\n proposal_boxes,\r\n ) # Nx(KxB)\r\n return predict_boxes.split(num_prop_per_image)\r\n\r\n def predict_probs(\r\n self, predictions: Tuple[torch.Tensor, torch.Tensor], proposals: List[Instances]\r\n ):\r\n \"\"\"\r\n Args:\r\n predictions: return values of :meth:`forward()`.\r\n proposals (list[Instances]): proposals that match the features that were\r\n used to compute predictions.\r\n\r\n Returns:\r\n list[Tensor]:\r\n A list of Tensors of predicted class probabilities for each image.\r\n Element i has shape (Ri, K + 1), where Ri is the number of proposals for image i.\r\n \"\"\"\r\n scores, _ = predictions\r\n num_inst_per_image = [len(p) for p in proposals]\r\n if self.use_sigmoid_ce:\r\n probs = scores.sigmoid()\r\n else:\r\n probs = F.softmax(scores, dim=-1)\r\n return probs.split(num_inst_per_image, dim=0)\r" }, { "identifier": "Caffe2Compatible", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2Compatible(object):\r\n \"\"\"\r\n A model can inherit this class to indicate that it can be traced and deployed with caffe2.\r\n \"\"\"\r\n\r\n def _get_tensor_mode(self):\r\n return self._tensor_mode\r\n\r\n def _set_tensor_mode(self, v):\r\n self._tensor_mode = v\r\n\r\n tensor_mode = property(_get_tensor_mode, _set_tensor_mode)\r\n \"\"\"\r\n If true, the model expects C2-style tensor only inputs/outputs format.\r\n \"\"\"\r" }, { "identifier": "Caffe2FastRCNNOutputsInference", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2FastRCNNOutputsInference:\r\n def __init__(self, tensor_mode):\r\n self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode\r\n\r\n def __call__(self, box_predictor, predictions, proposals):\r\n \"\"\"equivalent to FastRCNNOutputLayers.inference\"\"\"\r\n num_classes = box_predictor.num_classes\r\n score_thresh = box_predictor.test_score_thresh\r\n nms_thresh = box_predictor.test_nms_thresh\r\n topk_per_image = box_predictor.test_topk_per_image\r\n is_rotated = len(box_predictor.box2box_transform.weights) == 5\r\n\r\n if is_rotated:\r\n box_dim = 5\r\n assert box_predictor.box2box_transform.weights[4] == 1, (\r\n \"The weights for Rotated BBoxTransform in C2 have only 4 dimensions,\"\r\n + \" thus enforcing the angle weight to be 1 for now\"\r\n )\r\n box2box_transform_weights = box_predictor.box2box_transform.weights[:4]\r\n else:\r\n box_dim = 4\r\n box2box_transform_weights = box_predictor.box2box_transform.weights\r\n\r\n class_logits, box_regression = predictions\r\n if num_classes + 1 == class_logits.shape[1]:\r\n class_prob = F.softmax(class_logits, -1)\r\n else:\r\n assert num_classes == class_logits.shape[1]\r\n class_prob = F.sigmoid(class_logits)\r\n # BoxWithNMSLimit will infer num_classes from the shape of the class_prob\r\n # So append a zero column as placeholder for the background class\r\n class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)\r\n\r\n assert box_regression.shape[1] % box_dim == 0\r\n cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1\r\n\r\n input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1\r\n\r\n proposal_boxes = proposals[0].proposal_boxes\r\n if isinstance(proposal_boxes, Caffe2Boxes):\r\n rois = Caffe2Boxes.cat([p.proposal_boxes for p in proposals])\r\n elif isinstance(proposal_boxes, RotatedBoxes):\r\n rois = RotatedBoxes.cat([p.proposal_boxes for p in proposals])\r\n elif isinstance(proposal_boxes, Boxes):\r\n rois = Boxes.cat([p.proposal_boxes for p in proposals])\r\n else:\r\n raise NotImplementedError(\r\n 'Expected proposals[0].proposal_boxes to be type \"Boxes\", '\r\n f\"instead got {type(proposal_boxes)}\"\r\n )\r\n\r\n device, dtype = rois.tensor.device, rois.tensor.dtype\r\n if input_tensor_mode:\r\n im_info = proposals[0].image_size\r\n rois = rois.tensor\r\n else:\r\n im_info = torch.tensor(\r\n [[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]\r\n )\r\n batch_ids = cat(\r\n [\r\n torch.full((b, 1), i, dtype=dtype, device=device)\r\n for i, b in enumerate(len(p) for p in proposals)\r\n ],\r\n dim=0,\r\n )\r\n rois = torch.cat([batch_ids, rois.tensor], dim=1)\r\n\r\n roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(\r\n to_device(rois, \"cpu\"),\r\n to_device(box_regression, \"cpu\"),\r\n to_device(im_info, \"cpu\"),\r\n weights=box2box_transform_weights,\r\n apply_scale=True,\r\n rotated=is_rotated,\r\n angle_bound_on=True,\r\n angle_bound_lo=-180,\r\n angle_bound_hi=180,\r\n clip_angle_thresh=1.0,\r\n legacy_plus_one=False,\r\n )\r\n roi_pred_bbox = to_device(roi_pred_bbox, device)\r\n roi_batch_splits = to_device(roi_batch_splits, device)\r\n\r\n nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(\r\n to_device(class_prob, \"cpu\"),\r\n to_device(roi_pred_bbox, \"cpu\"),\r\n to_device(roi_batch_splits, \"cpu\"),\r\n score_thresh=float(score_thresh),\r\n nms=float(nms_thresh),\r\n detections_per_im=int(topk_per_image),\r\n soft_nms_enabled=False,\r\n soft_nms_method=\"linear\",\r\n soft_nms_sigma=0.5,\r\n soft_nms_min_score_thres=0.001,\r\n rotated=is_rotated,\r\n cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,\r\n input_boxes_include_bg_cls=False,\r\n output_classes_include_bg_cls=False,\r\n legacy_plus_one=False,\r\n )\r\n roi_score_nms = to_device(nms_outputs[0], device)\r\n roi_bbox_nms = to_device(nms_outputs[1], device)\r\n roi_class_nms = to_device(nms_outputs[2], device)\r\n roi_batch_splits_nms = to_device(nms_outputs[3], device)\r\n roi_keeps_nms = to_device(nms_outputs[4], device)\r\n roi_keeps_size_nms = to_device(nms_outputs[5], device)\r\n if not self.tensor_mode:\r\n roi_class_nms = roi_class_nms.to(torch.int64)\r\n\r\n roi_batch_ids = cat(\r\n [\r\n torch.full((b, 1), i, dtype=dtype, device=device)\r\n for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)\r\n ],\r\n dim=0,\r\n )\r\n\r\n roi_class_nms = alias(roi_class_nms, \"class_nms\")\r\n roi_score_nms = alias(roi_score_nms, \"score_nms\")\r\n roi_bbox_nms = alias(roi_bbox_nms, \"bbox_nms\")\r\n roi_batch_splits_nms = alias(roi_batch_splits_nms, \"batch_splits_nms\")\r\n roi_keeps_nms = alias(roi_keeps_nms, \"keeps_nms\")\r\n roi_keeps_size_nms = alias(roi_keeps_size_nms, \"keeps_size_nms\")\r\n\r\n results = InstancesList(\r\n im_info=im_info,\r\n indices=roi_batch_ids[:, 0],\r\n extra_fields={\r\n \"pred_boxes\": Caffe2Boxes(roi_bbox_nms),\r\n \"scores\": roi_score_nms,\r\n \"pred_classes\": roi_class_nms,\r\n },\r\n )\r\n\r\n if not self.tensor_mode:\r\n results = InstancesList.to_d2_instances_list(results)\r\n batch_splits = roi_batch_splits_nms.int().tolist()\r\n kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))\r\n else:\r\n results = [results]\r\n kept_indices = [roi_keeps_nms]\r\n\r\n return results, kept_indices\r" }, { "identifier": "Caffe2KeypointRCNNInference", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2KeypointRCNNInference:\r\n def __init__(self, use_heatmap_max_keypoint):\r\n self.use_heatmap_max_keypoint = use_heatmap_max_keypoint\r\n\r\n def __call__(self, pred_keypoint_logits, pred_instances):\r\n # just return the keypoint heatmap for now,\r\n # there will be option to call HeatmapMaxKeypointOp\r\n output = alias(pred_keypoint_logits, \"kps_score\")\r\n if all(isinstance(x, InstancesList) for x in pred_instances):\r\n assert len(pred_instances) == 1\r\n if self.use_heatmap_max_keypoint:\r\n device = output.device\r\n output = torch.ops._caffe2.HeatmapMaxKeypoint(\r\n to_device(output, \"cpu\"),\r\n pred_instances[0].pred_boxes.tensor,\r\n should_output_softmax=True, # worth make it configerable?\r\n )\r\n output = to_device(output, device)\r\n output = alias(output, \"keypoints_out\")\r\n pred_instances[0].set(\"pred_keypoints\", output)\r\n return pred_keypoint_logits\r" }, { "identifier": "Caffe2MaskRCNNInference", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2MaskRCNNInference:\r\n def __call__(self, pred_mask_logits, pred_instances):\r\n \"\"\"equivalent to mask_head.mask_rcnn_inference\"\"\"\r\n if all(isinstance(x, InstancesList) for x in pred_instances):\r\n assert len(pred_instances) == 1\r\n mask_probs_pred = pred_mask_logits.sigmoid()\r\n mask_probs_pred = alias(mask_probs_pred, \"mask_fcn_probs\")\r\n pred_instances[0].set(\"pred_masks\", mask_probs_pred)\r\n else:\r\n mask_rcnn_inference(pred_mask_logits, pred_instances)\r" }, { "identifier": "Caffe2ROIPooler", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):\r\n @staticmethod\r\n def c2_preprocess(box_lists):\r\n assert all(isinstance(x, Boxes) for x in box_lists)\r\n if all(isinstance(x, Caffe2Boxes) for x in box_lists):\r\n # input is pure-tensor based\r\n assert len(box_lists) == 1\r\n pooler_fmt_boxes = box_lists[0].tensor\r\n else:\r\n pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)\r\n return pooler_fmt_boxes\r\n\r\n def forward(self, x, box_lists):\r\n assert not self.training\r\n\r\n pooler_fmt_boxes = self.c2_preprocess(box_lists)\r\n num_level_assignments = len(self.level_poolers)\r\n\r\n if num_level_assignments == 1:\r\n if isinstance(self.level_poolers[0], ROIAlignRotated):\r\n c2_roi_align = torch.ops._caffe2.RoIAlignRotated\r\n aligned = True\r\n else:\r\n c2_roi_align = torch.ops._caffe2.RoIAlign\r\n aligned = self.level_poolers[0].aligned\r\n\r\n x0 = x[0]\r\n if x0.is_quantized:\r\n x0 = x0.dequantize()\r\n\r\n out = c2_roi_align(\r\n x0,\r\n pooler_fmt_boxes,\r\n order=\"NCHW\",\r\n spatial_scale=float(self.level_poolers[0].spatial_scale),\r\n pooled_h=int(self.output_size[0]),\r\n pooled_w=int(self.output_size[1]),\r\n sampling_ratio=int(self.level_poolers[0].sampling_ratio),\r\n aligned=aligned,\r\n )\r\n return out\r\n\r\n device = pooler_fmt_boxes.device\r\n assert (\r\n self.max_level - self.min_level + 1 == 4\r\n ), \"Currently DistributeFpnProposals only support 4 levels\"\r\n fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(\r\n to_device(pooler_fmt_boxes, \"cpu\"),\r\n roi_canonical_scale=self.canonical_box_size,\r\n roi_canonical_level=self.canonical_level,\r\n roi_max_level=self.max_level,\r\n roi_min_level=self.min_level,\r\n legacy_plus_one=False,\r\n )\r\n fpn_outputs = [to_device(x, device) for x in fpn_outputs]\r\n\r\n rois_fpn_list = fpn_outputs[:-1]\r\n rois_idx_restore_int32 = fpn_outputs[-1]\r\n\r\n roi_feat_fpn_list = []\r\n for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):\r\n if isinstance(pooler, ROIAlignRotated):\r\n c2_roi_align = torch.ops._caffe2.RoIAlignRotated\r\n aligned = True\r\n else:\r\n c2_roi_align = torch.ops._caffe2.RoIAlign\r\n aligned = bool(pooler.aligned)\r\n\r\n if x_level.is_quantized:\r\n x_level = x_level.dequantize()\r\n\r\n roi_feat_fpn = c2_roi_align(\r\n x_level,\r\n roi_fpn,\r\n order=\"NCHW\",\r\n spatial_scale=float(pooler.spatial_scale),\r\n pooled_h=int(self.output_size[0]),\r\n pooled_w=int(self.output_size[1]),\r\n sampling_ratio=int(pooler.sampling_ratio),\r\n aligned=aligned,\r\n )\r\n roi_feat_fpn_list.append(roi_feat_fpn)\r\n\r\n roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)\r\n assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (\r\n \"Caffe2 export requires tracing with a model checkpoint + input that can produce valid\"\r\n \" detections. But no detections were obtained with the given checkpoint and input!\"\r\n )\r\n roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)\r\n return roi_feat\r" }, { "identifier": "Caffe2RPN", "path": "annotator/oneformer/detectron2/export/c10.py", "snippet": "class Caffe2RPN(Caffe2Compatible, rpn.RPN):\r\n @classmethod\r\n def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):\r\n ret = super(Caffe2Compatible, cls).from_config(cfg, input_shape)\r\n assert tuple(cfg.MODEL.RPN.BBOX_REG_WEIGHTS) == (1.0, 1.0, 1.0, 1.0) or tuple(\r\n cfg.MODEL.RPN.BBOX_REG_WEIGHTS\r\n ) == (1.0, 1.0, 1.0, 1.0, 1.0)\r\n return ret\r\n\r\n def _generate_proposals(\r\n self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None\r\n ):\r\n assert isinstance(images, ImageList)\r\n if self.tensor_mode:\r\n im_info = images.image_sizes\r\n else:\r\n im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(\r\n images.tensor.device\r\n )\r\n assert isinstance(im_info, torch.Tensor)\r\n\r\n rpn_rois_list = []\r\n rpn_roi_probs_list = []\r\n for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(\r\n objectness_logits_pred,\r\n anchor_deltas_pred,\r\n [b for (n, b) in self.anchor_generator.cell_anchors.named_buffers()],\r\n self.anchor_generator.strides,\r\n ):\r\n scores = scores.detach()\r\n bbox_deltas = bbox_deltas.detach()\r\n\r\n rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(\r\n scores,\r\n bbox_deltas,\r\n im_info,\r\n cell_anchors_tensor,\r\n spatial_scale=1.0 / feat_stride,\r\n pre_nms_topN=self.pre_nms_topk[self.training],\r\n post_nms_topN=self.post_nms_topk[self.training],\r\n nms_thresh=self.nms_thresh,\r\n min_size=self.min_box_size,\r\n # correct_transform_coords=True, # deprecated argument\r\n angle_bound_on=True, # Default\r\n angle_bound_lo=-180,\r\n angle_bound_hi=180,\r\n clip_angle_thresh=1.0, # Default\r\n legacy_plus_one=False,\r\n )\r\n rpn_rois_list.append(rpn_rois)\r\n rpn_roi_probs_list.append(rpn_roi_probs)\r\n\r\n # For FPN in D2, in RPN all proposals from different levels are concated\r\n # together, ranked and picked by top post_nms_topk. Then in ROIPooler\r\n # it calculates level_assignments and calls the RoIAlign from\r\n # the corresponding level.\r\n\r\n if len(objectness_logits_pred) == 1:\r\n rpn_rois = rpn_rois_list[0]\r\n rpn_roi_probs = rpn_roi_probs_list[0]\r\n else:\r\n assert len(rpn_rois_list) == len(rpn_roi_probs_list)\r\n rpn_post_nms_topN = self.post_nms_topk[self.training]\r\n\r\n device = rpn_rois_list[0].device\r\n input_list = [to_device(x, \"cpu\") for x in (rpn_rois_list + rpn_roi_probs_list)]\r\n\r\n # TODO remove this after confirming rpn_max_level/rpn_min_level\r\n # is not needed in CollectRpnProposals.\r\n feature_strides = list(self.anchor_generator.strides)\r\n rpn_min_level = int(math.log2(feature_strides[0]))\r\n rpn_max_level = int(math.log2(feature_strides[-1]))\r\n assert (rpn_max_level - rpn_min_level + 1) == len(\r\n rpn_rois_list\r\n ), \"CollectRpnProposals requires continuous levels\"\r\n\r\n rpn_rois = torch.ops._caffe2.CollectRpnProposals(\r\n input_list,\r\n # NOTE: in current implementation, rpn_max_level and rpn_min_level\r\n # are not needed, only the subtraction of two matters and it\r\n # can be infer from the number of inputs. Keep them now for\r\n # consistency.\r\n rpn_max_level=2 + len(rpn_rois_list) - 1,\r\n rpn_min_level=2,\r\n rpn_post_nms_topN=rpn_post_nms_topN,\r\n )\r\n rpn_rois = to_device(rpn_rois, device)\r\n rpn_roi_probs = []\r\n\r\n proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)\r\n return proposals, {}\r\n\r\n def forward(self, images, features, gt_instances=None):\r\n assert not self.training\r\n features = [features[f] for f in self.in_features]\r\n objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)\r\n return self._generate_proposals(\r\n images,\r\n objectness_logits_pred,\r\n anchor_deltas_pred,\r\n gt_instances,\r\n )\r\n\r\n @staticmethod\r\n def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):\r\n proposals = InstancesList(\r\n im_info=im_info,\r\n indices=rpn_rois[:, 0],\r\n extra_fields={\r\n \"proposal_boxes\": Caffe2Boxes(rpn_rois),\r\n \"objectness_logits\": (torch.Tensor, rpn_roi_probs),\r\n },\r\n )\r\n if not tensor_mode:\r\n proposals = InstancesList.to_d2_instances_list(proposals)\r\n else:\r\n proposals = [proposals]\r\n return proposals\r" } ]
import contextlib import torch from unittest import mock from annotator.oneformer.detectron2.modeling import poolers from annotator.oneformer.detectron2.modeling.proposal_generator import rpn from annotator.oneformer.detectron2.modeling.roi_heads import keypoint_head, mask_head from annotator.oneformer.detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers from .c10 import ( Caffe2Compatible, Caffe2FastRCNNOutputsInference, Caffe2KeypointRCNNInference, Caffe2MaskRCNNInference, Caffe2ROIPooler, Caffe2RPN, )
11,187
# Copyright (c) Facebook, Inc. and its affiliates. class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference( tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers ): with mock.patch.object( box_predictor_type, "inference", autospec=True, side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): with mock.patch( "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): with mock.patch( "{}.keypoint_rcnn_inference".format(patched_module),
# Copyright (c) Facebook, Inc. and its affiliates. class GenericMixin(object): pass class Caffe2CompatibleConverter(object): """ A GenericUpdater which implements the `create_from` interface, by modifying module object and assign it with another class replaceCls. """ def __init__(self, replaceCls): self.replaceCls = replaceCls def create_from(self, module): # update module's class to the new class assert isinstance(module, torch.nn.Module) if issubclass(self.replaceCls, GenericMixin): # replaceCls should act as mixin, create a new class on-the-fly new_class = type( "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__), (self.replaceCls, module.__class__), {}, # {"new_method": lambda self: ...}, ) module.__class__ = new_class else: # replaceCls is complete class, this allow arbitrary class swap module.__class__ = self.replaceCls # initialize Caffe2Compatible if isinstance(module, Caffe2Compatible): module.tensor_mode = False return module def patch(model, target, updater, *args, **kwargs): """ recursively (post-order) update all modules with the target type and its subclasses, make a initialization/composition/inheritance/... via the updater.create_from. """ for name, module in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model def patch_generalized_rcnn(model): ccc = Caffe2CompatibleConverter model = patch(model, rpn.RPN, ccc(Caffe2RPN)) model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler)) return model @contextlib.contextmanager def mock_fastrcnn_outputs_inference( tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers ): with mock.patch.object( box_predictor_type, "inference", autospec=True, side_effect=Caffe2FastRCNNOutputsInference(tensor_mode), ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True): with mock.patch( "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference() ) as mocked_func: yield if check: assert mocked_func.call_count > 0 @contextlib.contextmanager def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True): with mock.patch( "{}.keypoint_rcnn_inference".format(patched_module),
side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
7
2023-12-05 02:51:53+00:00
16k
u2seg/U2Seg
detectron2/data/build.py
[ { "identifier": "configurable", "path": "detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "get_world_size", "path": "detectron2/utils/comm.py", "snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "seed_all_rng", "path": "detectron2/utils/env.py", "snippet": "def seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "_log_api_usage", "path": "detectron2/utils/logger.py", "snippet": "def _log_api_usage(identifier: str):\n \"\"\"\n Internal function used to log the usage of different detectron2 components\n inside facebook's infra.\n \"\"\"\n torch._C._log_api_usage_once(\"detectron2.\" + identifier)" }, { "identifier": "log_first_n", "path": "detectron2/utils/logger.py", "snippet": "def log_first_n(lvl, msg, n=1, *, name=None, key=\"caller\"):\n \"\"\"\n Log only for the first n times.\n\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n key (str or tuple[str]): the string(s) can be one of \"caller\" or\n \"message\", which defines how to identify duplicated logs.\n For example, if called with `n=1, key=\"caller\"`, this function\n will only log the first call from the same caller, regardless of\n the message content.\n If called with `n=1, key=\"message\"`, this function will log the\n same content only once, even if they are called from different places.\n If called with `n=1, key=(\"caller\", \"message\")`, this function\n will not log only if the same caller has logged the same message before.\n \"\"\"\n if isinstance(key, str):\n key = (key,)\n assert len(key) > 0\n\n caller_module, caller_key = _find_caller()\n hash_key = ()\n if \"caller\" in key:\n hash_key = hash_key + caller_key\n if \"message\" in key:\n hash_key = hash_key + (msg,)\n\n _LOG_COUNTER[hash_key] += 1\n if _LOG_COUNTER[hash_key] <= n:\n logging.getLogger(name or caller_module).log(lvl, msg)" }, { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "AspectRatioGroupedDataset", "path": "detectron2/data/common.py", "snippet": "class AspectRatioGroupedDataset(data.IterableDataset):\n \"\"\"\n Batch data that have similar aspect ratio together.\n In this implementation, images whose aspect ratio < (or >) 1 will\n be batched together.\n This improves training speed because the images then need less padding\n to form a batch.\n\n It assumes the underlying dataset produces dicts with \"width\" and \"height\" keys.\n It will then produce a list of original dicts with length = batch_size,\n all with similar aspect ratios.\n \"\"\"\n\n def __init__(self, dataset, batch_size):\n \"\"\"\n Args:\n dataset: an iterable. Each element must be a dict with keys\n \"width\" and \"height\", which will be used to batch data.\n batch_size (int):\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self._buckets = [[] for _ in range(2)]\n # Hard-coded two aspect ratio groups: w > h and w < h.\n # Can add support for more aspect ratio groups, but doesn't seem useful\n\n def __iter__(self):\n for d in self.dataset:\n w, h = d[\"width\"], d[\"height\"]\n bucket_id = 0 if w > h else 1\n bucket = self._buckets[bucket_id]\n bucket.append(d)\n if len(bucket) == self.batch_size:\n data = bucket[:]\n # Clear bucket first, because code after yield is not\n # guaranteed to execute\n del bucket[:]\n yield data" }, { "identifier": "DatasetFromList", "path": "detectron2/data/common.py", "snippet": "class DatasetFromList(data.Dataset):\n \"\"\"\n Wrap a list to a torch Dataset. It produces elements of the list as data.\n \"\"\"\n\n def __init__(\n self,\n lst: list,\n copy: bool = True,\n serialize: Union[bool, Callable] = True,\n ):\n \"\"\"\n Args:\n lst (list): a list which contains elements to produce.\n copy (bool): whether to deepcopy the element when producing it,\n so that the result can be modified in place without affecting the\n source in the list.\n serialize (bool or callable): whether to serialize the stroage to other\n backend. If `True`, the default serialize method will be used, if given\n a callable, the callable will be used as serialize method.\n \"\"\"\n self._lst = lst\n self._copy = copy\n if not isinstance(serialize, (bool, Callable)):\n raise TypeError(f\"Unsupported type for argument `serailzie`: {serialize}\")\n self._serialize = serialize is not False\n\n if self._serialize:\n serialize_method = (\n serialize\n if isinstance(serialize, Callable)\n else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD\n )\n logger.info(f\"Serializing the dataset using: {serialize_method}\")\n self._lst = serialize_method(self._lst)\n\n def __len__(self):\n return len(self._lst)\n\n def __getitem__(self, idx):\n if self._copy and not self._serialize:\n return copy.deepcopy(self._lst[idx])\n else:\n return self._lst[idx]" }, { "identifier": "MapDataset", "path": "detectron2/data/common.py", "snippet": "class MapDataset(data.Dataset):\n \"\"\"\n Map a function over the elements in a dataset.\n \"\"\"\n\n def __init__(self, dataset, map_func):\n \"\"\"\n Args:\n dataset: a dataset where map function is applied. Can be either\n map-style or iterable dataset. When given an iterable dataset,\n the returned object will also be an iterable dataset.\n map_func: a callable which maps the element in dataset. map_func can\n return None to skip the data (e.g. in case of errors).\n How None is handled depends on the style of `dataset`.\n If `dataset` is map-style, it randomly tries other elements.\n If `dataset` is iterable, it skips the data and tries the next.\n \"\"\"\n self._dataset = dataset\n self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work\n\n self._rng = random.Random(42)\n self._fallback_candidates = set(range(len(dataset)))\n\n def __new__(cls, dataset, map_func):\n is_iterable = isinstance(dataset, data.IterableDataset)\n if is_iterable:\n return _MapIterableDataset(dataset, map_func)\n else:\n return super().__new__(cls)\n\n def __getnewargs__(self):\n return self._dataset, self._map_func\n\n def __len__(self):\n return len(self._dataset)\n\n def __getitem__(self, idx):\n retry_count = 0\n cur_idx = int(idx)\n\n while True:\n data = self._map_func(self._dataset[cur_idx])\n if data is not None:\n self._fallback_candidates.add(cur_idx)\n return data\n\n # _map_func fails for this idx, use a random new index from the pool\n retry_count += 1\n self._fallback_candidates.discard(cur_idx)\n cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]\n\n if retry_count >= 3:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to apply `_map_func` for idx: {}, retry count: {}\".format(\n idx, retry_count\n )\n )" }, { "identifier": "ToIterableDataset", "path": "detectron2/data/common.py", "snippet": "class ToIterableDataset(data.IterableDataset):\n \"\"\"\n Convert an old indices-based (also called map-style) dataset\n to an iterable-style dataset.\n \"\"\"\n\n def __init__(\n self,\n dataset: data.Dataset,\n sampler: Sampler,\n shard_sampler: bool = True,\n shard_chunk_size: int = 1,\n ):\n \"\"\"\n Args:\n dataset: an old-style dataset with ``__getitem__``\n sampler: a cheap iterable that produces indices to be applied on ``dataset``.\n shard_sampler: whether to shard the sampler based on the current pytorch data loader\n worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple\n workers, it is responsible for sharding its data based on worker id so that workers\n don't produce identical data.\n\n Most samplers (like our TrainingSampler) do not shard based on dataloader worker id\n and this argument should be set to True. But certain samplers may be already\n sharded, in that case this argument should be set to False.\n shard_chunk_size: when sharding the sampler, each worker will\n \"\"\"\n assert not isinstance(dataset, data.IterableDataset), dataset\n assert isinstance(sampler, Sampler), sampler\n self.dataset = dataset\n self.sampler = sampler\n self.shard_sampler = shard_sampler\n self.shard_chunk_size = shard_chunk_size\n\n def __iter__(self):\n if not self.shard_sampler:\n sampler = self.sampler\n else:\n # With map-style dataset, `DataLoader(dataset, sampler)` runs the\n # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`\n # will run sampler in every of the N worker. So we should only keep 1/N of the ids on\n # each worker. The assumption is that sampler is cheap to iterate so it's fine to\n # discard ids in workers.\n sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)\n for idx in sampler:\n yield self.dataset[idx]\n\n def __len__(self):\n return len(self.sampler)" }, { "identifier": "DatasetMapper", "path": "detectron2/data/dataset_mapper.py", "snippet": "class DatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n use_keypoint: bool = False,\n instance_mask_format: str = \"polygon\",\n keypoint_hflip_indices: Optional[np.ndarray] = None,\n precomputed_proposal_topk: Optional[int] = None,\n recompute_boxes: bool = False,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of \"polygon\" or \"bitmask\". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n \"\"\"\n if recompute_boxes:\n assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.instance_mask_format = instance_mask_format\n self.use_keypoint = use_keypoint\n self.keypoint_hflip_indices = keypoint_hflip_indices\n self.proposal_topk = precomputed_proposal_topk\n self.recompute_boxes = recompute_boxes\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = utils.build_augmentation(cfg, is_train)\n if cfg.INPUT.CROP.ENABLED and is_train:\n augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))\n recompute_boxes = cfg.MODEL.MASK_ON\n else:\n recompute_boxes = False\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"instance_mask_format\": cfg.INPUT.MASK_FORMAT,\n \"use_keypoint\": cfg.MODEL.KEYPOINT_ON,\n \"recompute_boxes\": recompute_boxes,\n }\n\n if cfg.MODEL.KEYPOINT_ON:\n ret[\"keypoint_hflip_indices\"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)\n\n if cfg.MODEL.LOAD_PROPOSALS:\n ret[\"precomputed_proposal_topk\"] = (\n cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN\n if is_train\n else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST\n )\n return ret\n\n def _transform_annotations(self, dataset_dict, transforms, image_shape):\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n if not self.use_instance_mask:\n anno.pop(\"segmentation\", None)\n if not self.use_keypoint:\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(\n obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices\n )\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(\n annos, image_shape, mask_format=self.instance_mask_format\n )\n\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n if self.recompute_boxes:\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n transforms = self.augmentations(aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n # USER: Remove if you don't use pre-computed proposals.\n # Most users would not need this feature.\n if self.proposal_topk is not None:\n utils.transform_proposals(\n dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk\n )\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n self._transform_annotations(dataset_dict, transforms, image_shape)\n\n return dataset_dict" }, { "identifier": "check_metadata_consistency", "path": "detectron2/data/detection_utils.py", "snippet": "def check_metadata_consistency(key, dataset_names):\n \"\"\"\n Check that the datasets have consistent metadata.\n\n Args:\n key (str): a metadata key\n dataset_names (list[str]): a list of dataset names\n\n Raises:\n AttributeError: if the key does not exist in the metadata\n ValueError: if the given datasets do not have the same metadata values defined by key\n \"\"\"\n if len(dataset_names) == 0:\n return\n logger = logging.getLogger(__name__)\n entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]\n for idx, entry in enumerate(entries_per_dataset):\n if entry != entries_per_dataset[0]:\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(key, dataset_names[idx], str(entry))\n )\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(\n key, dataset_names[0], str(entries_per_dataset[0])\n )\n )\n raise ValueError(\"Datasets have different metadata '{}'!\".format(key))" }, { "identifier": "InferenceSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class InferenceSampler(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self._size = size\n assert size > 0\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n self._local_indices = self._get_local_indices(size, self._world_size, self._rank)\n\n @staticmethod\n def _get_local_indices(total_size, world_size, rank):\n shard_size = total_size // world_size\n left = total_size % world_size\n shard_sizes = [shard_size + int(r < left) for r in range(world_size)]\n\n begin = sum(shard_sizes[:rank])\n end = min(sum(shard_sizes[: rank + 1]), total_size)\n return range(begin, end)\n\n def __iter__(self):\n yield from self._local_indices\n\n def __len__(self):\n return len(self._local_indices)" }, { "identifier": "RandomSubsetTrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class RandomSubsetTrainingSampler(TrainingSampler):\n \"\"\"\n Similar to TrainingSampler, but only sample a random subset of indices.\n This is useful when you want to estimate the accuracy vs data-number curves by\n training the model with different subset_ratio.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n subset_ratio: float,\n shuffle: bool = True,\n seed_shuffle: Optional[int] = None,\n seed_subset: Optional[int] = None,\n ):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n subset_ratio (float): the ratio of subset data to sample from the underlying dataset\n shuffle (bool): whether to shuffle the indices or not\n seed_shuffle (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n seed_subset (int): the seed to randomize the subset to be sampled.\n Must be the same across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)\n\n assert 0.0 < subset_ratio <= 1.0\n self._size_subset = int(size * subset_ratio)\n assert self._size_subset > 0\n if seed_subset is None:\n seed_subset = comm.shared_random_seed()\n self._seed_subset = int(seed_subset)\n\n # randomly generate the subset indexes to be sampled from\n g = torch.Generator()\n g.manual_seed(self._seed_subset)\n indexes_randperm = torch.randperm(self._size, generator=g)\n self._indexes_subset = indexes_randperm[: self._size_subset]\n\n logger.info(\"Using RandomSubsetTrainingSampler......\")\n logger.info(f\"Randomly sample {self._size_subset} data from the original {self._size} data\")\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()\n while True:\n if self._shuffle:\n # generate a random permutation to shuffle self._indexes_subset\n randperm = torch.randperm(self._size_subset, generator=g)\n yield from self._indexes_subset[randperm].tolist()\n else:\n yield from self._indexes_subset.tolist()" }, { "identifier": "RepeatFactorTrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class RepeatFactorTrainingSampler(Sampler):\n \"\"\"\n Similar to TrainingSampler, but a sample may appear more times than others based\n on its \"repeat factor\". This is suitable for training on class imbalanced datasets like LVIS.\n \"\"\"\n\n def __init__(self, repeat_factors, *, shuffle=True, seed=None):\n \"\"\"\n Args:\n repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's\n full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n # Split into whole number (_int_part) and fractional (_frac_part) parts.\n self._int_part = torch.trunc(repeat_factors)\n self._frac_part = repeat_factors - self._int_part\n\n @staticmethod\n def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):\n \"\"\"\n Compute (fractional) per-image repeat factors based on category frequency.\n The repeat factor for an image is a function of the frequency of the rarest\n category labeled in that image. The \"frequency of category c\" in [0, 1] is defined\n as the fraction of images in the training set (without repeats) in which category c\n appears.\n See :paper:`lvis` (>= v2) Appendix B.2.\n\n Args:\n dataset_dicts (list[dict]): annotations in Detectron2 dataset format.\n repeat_thresh (float): frequency threshold below which data is repeated.\n If the frequency is half of `repeat_thresh`, the image will be\n repeated twice.\n\n Returns:\n torch.Tensor:\n the i-th element is the repeat factor for the dataset image at index i.\n \"\"\"\n # 1. For each category c, compute the fraction of images that contain it: f(c)\n category_freq = defaultdict(int)\n for dataset_dict in dataset_dicts: # For each image (without repeats)\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n for cat_id in cat_ids:\n category_freq[cat_id] += 1\n num_images = len(dataset_dicts)\n for k, v in category_freq.items():\n category_freq[k] = v / num_images\n\n # 2. For each category c, compute the category-level repeat factor:\n # r(c) = max(1, sqrt(t / f(c)))\n category_rep = {\n cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))\n for cat_id, cat_freq in category_freq.items()\n }\n\n # 3. For each image I, compute the image-level repeat factor:\n # r(I) = max_{c in I} r(c)\n rep_factors = []\n for dataset_dict in dataset_dicts:\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)\n rep_factors.append(rep_factor)\n\n return torch.tensor(rep_factors, dtype=torch.float32)\n\n def _get_epoch_indices(self, generator):\n \"\"\"\n Create a list of dataset indices (with repeats) to use for one epoch.\n\n Args:\n generator (torch.Generator): pseudo random number generator used for\n stochastic rounding.\n\n Returns:\n torch.Tensor: list of dataset indices to use in one epoch. Each index\n is repeated based on its calculated repeat factor.\n \"\"\"\n # Since repeat factors are fractional, we use stochastic rounding so\n # that the target repeat factor is achieved in expectation over the\n # course of training\n rands = torch.rand(len(self._frac_part), generator=generator)\n rep_factors = self._int_part + (rands < self._frac_part).float()\n # Construct a list of indices in which we repeat images as specified\n indices = []\n for dataset_index, rep_factor in enumerate(rep_factors):\n indices.extend([dataset_index] * int(rep_factor.item()))\n return torch.tensor(indices, dtype=torch.int64)\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n # Sample indices with repeats determined by stochastic rounding; each\n # \"epoch\" may have a slightly different size due to the rounding.\n indices = self._get_epoch_indices(g)\n if self._shuffle:\n randperm = torch.randperm(len(indices), generator=g)\n yield from indices[randperm].tolist()\n else:\n yield from indices.tolist()" }, { "identifier": "TrainingSampler", "path": "detectron2/data/samplers/distributed_sampler.py", "snippet": "class TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n\n Note that this sampler does not shard based on pytorch DataLoader worker id.\n A sampler passed to pytorch DataLoader is used only with map-style dataset\n and will not be executed inside workers.\n But if this sampler is used in a way that it gets execute inside a dataloader\n worker, then extra work needs to be done to shard its outputs based on worker id.\n This is required so that workers don't produce identical data.\n :class:`ToIterableDataset` implements this logic.\n This note is true for all samplers in detectron2.\n \"\"\"\n\n def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(f\"TrainingSampler(size=) expects an int. Got type {type(size)}.\")\n if size <= 0:\n raise ValueError(f\"TrainingSampler(size=) expects a positive int. Got {size}.\")\n self._size = size\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n if self._shuffle:\n yield from torch.randperm(self._size, generator=g).tolist()\n else:\n yield from torch.arange(self._size).tolist()" } ]
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from detectron2.config import configurable from detectron2.structures import BoxMode from detectron2.utils.comm import get_world_size from detectron2.utils.env import seed_all_rng from detectron2.utils.file_io import PathManager from detectron2.utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
11,729
Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names
available_datasets = DatasetCatalog.keys()
7
2023-12-05 01:13:31+00:00
16k
upfusion3d/upfusion
control_net/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "control_net/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "control_net/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "control_net/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "control_net/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "control_net/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "control_net/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "control_net/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "control_net/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "control_net/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "control_net/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "control_net/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "control_net/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "control_net/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "control_net/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n disable_tdqm=kwargs.get(\"disable_tdqm\", False),\n cfg_type=kwargs.get(\"cfg_type\", None)\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, disable_tdqm=False, cfg_type=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps, disable=disable_tdqm)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n raise RuntimeError(\"not supported since this may mess up the new cfg logic\")\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim_v2(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold, cfg_type=cfg_type)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if (isinstance(c[k], list)) and (type(c[k][0]) is not PerspectiveCameras):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n elif (isinstance(c[k], list)) and ((type(c[k][0]) is PerspectiveCameras) or (c[k][0] is None)):\n c_in[k] = unconditional_conditioning[k] + c[k]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def p_sample_ddim_v2(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, cfg_type=None):\n # NOTE: v2 is a custom version so that modifications can be made more easily\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n if not isinstance(c, dict):\n raise RuntimeError(\"Not supported!\")\n\n # For cfg_type \"legacy\" or \"F1\"\n if isinstance(unconditional_conditioning, dict):\n c_in = dict()\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n for k in c:\n if (isinstance(c[k], list)) and (type(c[k][0]) is not PerspectiveCameras):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n elif (isinstance(c[k], list)) and ((type(c[k][0]) is PerspectiveCameras) or (c[k][0] is None)):\n c_in[k] = unconditional_conditioning[k] + c[k]\n elif (isinstance(c[k], torch.Tensor)):\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]], dim=0)\n else:\n raise RuntimeError(\"Not supported!\")\n\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n elif isinstance(unconditional_conditioning, list):\n raise ValueError\n\n else:\n raise RuntimeError(\"Not supported!\")\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n raise RuntimeError(\"Function supported since the new cfg logic is not incorporated here\")\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(\n self, x_latent, cond, t_start, cfg_type=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None\n ):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps, disable=True)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim_v2(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, \n cfg_type=cfg_type, unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning\n )\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from control_net.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from control_net.ldm.modules.ema import LitEma from control_net.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from control_net.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from control_net.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from control_net.ldm.models.diffusion.ddim import DDIMSampler
11,165
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
1
2023-12-12 00:49:11+00:00
16k
modelscope/normal-depth-diffusion
scripts/t2i.py
[ { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(\n 0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat(\n [unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n elif isinstance(c[k], torch.Tensor):\n c_in[k] = torch.cat(\n [unconditional_conditioning[k], c[k]])\n else:\n assert c[k] == unconditional_conditioning[k]\n c_in[k] = c[k]\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(\n torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n # model_t = self.model.apply_model(x, t, c, **kwargs)\n # model_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n model_output = model_uncond + unconditional_guidance_scale * (\n model_t - model_uncond)\n\n if self.model.parameterization == 'v':\n print('using v!')\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps', 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n if self.model.parameterization != 'v':\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)\n * noise)\n\n @torch.no_grad()\n def decode(self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n **kwargs):\n\n timesteps = np.arange(self.ddpm_num_timesteps\n ) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0], ),\n step,\n device=x_latent.device,\n dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return x_dec" }, { "identifier": "DPMSolverSampler", "path": "ldm/models/diffusion/dpm_solver/sampler.py", "snippet": "class DPMSolverSampler(object):\n\n def __init__(self, model, **kwargs):\n super().__init__()\n self.model = model\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.\n device)\n self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')\n\n device = self.model.betas.device\n if x_T is None:\n img = torch.randn(size, device=device)\n else:\n img = x_T\n\n ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)\n\n model_fn = model_wrapper(\n lambda x, t, c: self.model.apply_model(x, t, c),\n ns,\n model_type='noise',\n guidance_type='classifier-free',\n condition=conditioning,\n unconditional_condition=unconditional_conditioning,\n guidance_scale=unconditional_guidance_scale,\n )\n\n dpm_solver = DPM_Solver(\n model_fn, ns, predict_x0=True, thresholding=False)\n x = dpm_solver.sample(\n img,\n steps=S,\n skip_type='time_uniform',\n method='multistep',\n order=2,\n lower_order_final=True)\n\n return x.to(device), None" }, { "identifier": "PLMSSampler", "path": "ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(\n 0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n print(f'Running PLMS Sampling with {total_steps} timesteps')\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n ts_next = torch.full((b, ),\n time_range[min(i + 1,\n len(time_range) - 1)],\n device=device,\n dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps,\n t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n old_eps=None,\n t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (\n e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1),\n alphas_prev[index],\n device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2]\n - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not 'target' in config:\n\n print(config)\n if config == '__is_first_stage__':\n return None\n elif config == '__is_unconditional__':\n return None\n raise KeyError('Expected key `target` to instantiate.')\n return get_obj_from_str(config['target'])(**config.get('params', dict()))" }, { "identifier": "build_model", "path": "model_zoo.py", "snippet": "def build_model(model_name,\n ckpt_path=None,\n cache_dir=None,\n return_cfg=False,\n strict=True):\n if not model_name in PRETRAINED_MODELS:\n raise RuntimeError(\n f'Model name {model_name} is not a pre-trained model. Available models are:\\n- ' + \\\n '\\n- '.join(PRETRAINED_MODELS.keys())\n )\n model_info = PRETRAINED_MODELS[model_name]\n\n # Instiantiate the model\n print(f\"Loading model from config: {model_info['config']}\")\n config_file = os.path.join(REPO_DIR, model_info['config'])\n assert os.path.exists(config_file)\n\n config = OmegaConf.load(config_file)\n\n # loading from ema_model\n model = instantiate_from_config(config.model)\n if ckpt_path.endswith('_ema.ckpt'):\n ema_ckpt_path = ckpt_path\n else:\n ema_ckpt_path = os.path.splitext(ckpt_path)[0] + '_ema.ckpt'\n\n # model_ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']\n # model_ckpt = extract_ema(model, model_ckpt)\n print(ema_ckpt_path)\n if os.path.exists(ema_ckpt_path):\n print(f'load from ema_ckpt:{ema_ckpt_path}')\n ckpt_path = ema_ckpt_path\n model_ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']\n else:\n model_ckpt = torch.load(ckpt_path, map_location='cpu')\n model_ckpt = extract_ema(model, model_ckpt['state_dict'])\n torch.save({'state_dict': model_ckpt}, ema_ckpt_path)\n\n model.load_state_dict(model_ckpt, strict=strict)\n\n if not return_cfg:\n return model\n else:\n return model, config" }, { "identifier": "map_2_16bit", "path": "utils/color_transfer.py", "snippet": "def map_2_16bit(x):\n x = (np.clip(x, 0, 1.) * 65535).astype(np.uint16)\n\n low_x = np.zeros_like(x)\n low_x[x < 256] = x[x < 256]\n high_x = x >> 8\n\n return np.concatenate(\n [np.zeros_like(low_x[..., None]), high_x[..., None], low_x[..., None]],\n axis=-1).astype(np.uint8)" }, { "identifier": "map_16bit_2_8", "path": "utils/color_transfer.py", "snippet": "def map_16bit_2_8(x):\n\n x = x.astype(np.uint16)\n ret_v = x[..., 1] << 8 + x[..., 0]\n\n return ret_v / 65535." }, { "identifier": "split_rgbd", "path": "utils/color_transfer.py", "snippet": "def split_rgbd(x, is_bgr=False):\n '''\n x: np.uint8\n '''\n\n rgb, depth = x[..., :3], x[..., 3:]\n if is_bgr:\n rgb = rgb[..., ::-1]\n\n depth = (map_16bit_2_8(depth) * 255).astype(np.uint8)\n depth = np.repeat(depth[..., None], 3, axis=-1)\n rgbd = np.concatenate([rgb, depth], axis=1)\n\n return rgbd" }, { "identifier": "split_rgbd_only_tensor", "path": "utils/color_transfer.py", "snippet": "def split_rgbd_only_tensor(x_tensor):\n\n # depth is from [0 1]\n rgb, depth = x_tensor[:, :3], x_tensor[:, 3]\n depth_v = repeat(depth[:, None], 'b 1 h w -> b 3 h w')\n\n return torch.cat([rgb, depth_v], dim=1)" }, { "identifier": "split_rgbd_tensor", "path": "utils/color_transfer.py", "snippet": "def split_rgbd_tensor(x_tensor):\n\n # depth is from [0 1]\n rgb, depth = torch.split(x_tensor, 3, dim=1)\n depth = depth * 255\n depth_v = depth[:, 1] * 255 + depth[:, 0]\n depth_v = depth_v / 65535\n depth_v = repeat(depth_v[:, None], 'b 1 h w -> b 3 h w')\n\n return torch.cat([rgb, depth_v], dim=1)" } ]
import argparse import glob import os import pdb import sys import time import cv2 import numpy as np import torch from contextlib import contextmanager, nullcontext from itertools import islice from diffusers.pipelines.stable_diffusion.safety_checker import \ StableDiffusionSafetyChecker from einops import rearrange, repeat from imwatermark import WatermarkEncoder from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.util import instantiate_from_config from model_zoo import build_model from omegaconf import OmegaConf from PIL import Image from pytorch_lightning import seed_everything from torch import autocast from torchvision.utils import make_grid from tqdm import tqdm, trange from transformers import AutoFeatureExtractor from utils.color_transfer import (map_2_16bit, map_16bit_2_8, split_rgbd, split_rgbd_only_tensor, split_rgbd_tensor)
10,863
default=42, help='the seed (for reproducible sampling)', ) parser.add_argument( '--precision', type=str, help='evaluate at this precision', choices=['full', 'autocast'], default='autocast') opt = parser.parse_args() seed_everything(opt.seed) ckpt_name = os.path.splitext(os.path.basename(opt.ckpt))[0] outdir = os.path.join(opt.save_dir, ckpt_name) os.makedirs(outdir, exist_ok=True) outpath = outdir # config = OmegaConf.load(f"{opt.config}") # model = load_model_from_config(config, f"{opt.ckpt}") model = build_model('nd', opt.ckpt, strict=False) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model = model.to(device) if opt.dpm_solver: sampler = DPMSolverSampler(model) elif opt.plms: sampler = PLMSSampler(model) else: sampler = DDIMSampler(model) print( 'Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...' ) wm = 'StableDiffusionV1' wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f'reading prompts from {opt.from_file}') with open(opt.from_file, 'r') as f: data = f.read().splitlines() data = list(chunk(data, batch_size)) if opt.prompt is None: prompts = [ 'a close up of a sheet of pizza on a table.', 'A picture of some lemons on a table.', 'A little girl with a pink bow in her hair eating broccoli.', 'A highly detailed stone bust of Theodoros Kolokotronis', ] else: prompts = [opt.prompt] sub = '' for prompt_id, prompt in enumerate(prompts): if prompt[-1] == '.': prompt = prompt[:-1] data = [batch_size * [prompt + sub]] save_path = os.path.join(outpath, 'normal-depth') os.makedirs(save_path, exist_ok=True) base_count = prompt_id grid_count = prompt_id start_code = None if opt.fixed_code: start_code = torch.randn( [opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) precision_scope = autocast if opt.precision == 'autocast' else nullcontext with torch.no_grad(): with precision_scope('cuda'): tic = time.time() all_samples = list() for n in trange(opt.n_iter, desc='Sampling'): for prompts in tqdm(data, desc='data'): uc = None if opt.scale != 1.0: # uc = model.get_learned_conditioning(batch_size * [""]) uc = model.get_learned_conditioning( batch_size * [NEGATIVE_PROMPTS]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples_ddim, _ = sampler.sample( S=50, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) ''' rgbd= (samples_ddim[0]).permute(1,2,0) rgb = rgbd[...,:3] depth = rgbd[...,3] cv2.imwrite('rgb.png',((rgb.clamp(-1,1).detach().cpu().numpy()+1)[...,::-1] /2 * 255).astype(np.uint8)) cv2.imwrite('depth.png', ((depth.clamp(-1,1).detach().cpu().numpy()+1) /2 * 255).astype(np.uint8)) ''' x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp( (x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
sys.path.append('./') # load safety model ''' safety_model_id = "CompVis/stable-diffusion-safety-checker" safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id) safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id) ''' NEGATIVE_PROMPTS = 'ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft.' def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def numpy_to_pil(images): """ Convert a numpy image or a batch of images to a PIL image. """ if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype('uint8') pil_images = [Image.fromarray(image) for image in images] return pil_images def load_model_from_config(config, ckpt, verbose=False): print(f'Loading model from {ckpt}') pl_sd = torch.load(ckpt, map_location='cpu') if 'global_step' in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd['state_dict'] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print('missing keys:') print(m) if len(u) > 0 and verbose: print('unexpected keys:') print(u) model.cuda() model.eval() return model def put_watermark(img, wm_encoder=None): if wm_encoder is not None: img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) img = wm_encoder.encode(img, 'dwtDct') img = Image.fromarray(img[:, :, ::-1]) return img def load_replacement(x): try: hwc = x.shape y = Image.open('assets/rick.jpeg').convert('RGB').resize( (hwc[1], hwc[0])) y = (np.array(y) / 255.0).astype(x.dtype) assert y.shape == x.shape return y except Exception: return x def check_safety(x_image): return x_image, False def main(): parser = argparse.ArgumentParser() parser.add_argument( '--prompt', type=str, nargs='?', default=None, help='the prompt to render') parser.add_argument( '--save_dir', type=str, nargs='?', help='dir to write results to', default='outputs/txt2img-samples') parser.add_argument( '--skip_grid', action='store_true', help= 'do not save a grid, only individual samples. Helpful when evaluating lots of samples', ) parser.add_argument( '--skip_save', action='store_true', help='do not save individual samples. For speed measurements.', ) parser.add_argument( '--ddim_steps', type=int, default=50, help='number of ddim sampling steps', ) parser.add_argument( '--plms', action='store_true', help='use plms sampling', ) parser.add_argument( '--dpm_solver', action='store_true', help='use dpm_solver sampling', ) parser.add_argument( '--laion400m', action='store_true', help='uses the LAION400M model', ) parser.add_argument( '--fixed_code', action='store_true', help='if enabled, uses the same starting code across samples ', ) parser.add_argument( '--ddim_eta', type=float, default=0.0, help='ddim eta (eta=0.0 corresponds to deterministic sampling', ) parser.add_argument( '--n_iter', type=int, default=1, help='sample this often', ) parser.add_argument( '--H', type=int, default=512, help='image height, in pixel space', ) parser.add_argument( '--W', type=int, default=512, help='image width, in pixel space', ) parser.add_argument( '--C', type=int, default=4, help='latent channels', ) parser.add_argument( '--f', type=int, default=8, help='downsampling factor', ) parser.add_argument( '--n_samples', type=int, default=1, help= 'how many samples to produce for each given prompt. A.k.a. batch size', ) parser.add_argument( '--n_rows', type=int, default=0, help='rows in the grid (default: n_samples)', ) parser.add_argument( '--scale', type=float, default=7.5, help= 'unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))', ) parser.add_argument( '--from-file', type=str, help='if specified, load prompts from this file', ) parser.add_argument( '--config', type=str, default='./configs/inference/nd/nd-1.5-inference.yaml', help='path to config which constructs model', ) parser.add_argument( '--ckpt', type=str, default='models/ldm/txt2depth/last.ckpt', help='path to checkpoint of model', ) parser.add_argument( '--seed', type=int, default=42, help='the seed (for reproducible sampling)', ) parser.add_argument( '--precision', type=str, help='evaluate at this precision', choices=['full', 'autocast'], default='autocast') opt = parser.parse_args() seed_everything(opt.seed) ckpt_name = os.path.splitext(os.path.basename(opt.ckpt))[0] outdir = os.path.join(opt.save_dir, ckpt_name) os.makedirs(outdir, exist_ok=True) outpath = outdir # config = OmegaConf.load(f"{opt.config}") # model = load_model_from_config(config, f"{opt.ckpt}") model = build_model('nd', opt.ckpt, strict=False) device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') model = model.to(device) if opt.dpm_solver: sampler = DPMSolverSampler(model) elif opt.plms: sampler = PLMSSampler(model) else: sampler = DDIMSampler(model) print( 'Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...' ) wm = 'StableDiffusionV1' wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f'reading prompts from {opt.from_file}') with open(opt.from_file, 'r') as f: data = f.read().splitlines() data = list(chunk(data, batch_size)) if opt.prompt is None: prompts = [ 'a close up of a sheet of pizza on a table.', 'A picture of some lemons on a table.', 'A little girl with a pink bow in her hair eating broccoli.', 'A highly detailed stone bust of Theodoros Kolokotronis', ] else: prompts = [opt.prompt] sub = '' for prompt_id, prompt in enumerate(prompts): if prompt[-1] == '.': prompt = prompt[:-1] data = [batch_size * [prompt + sub]] save_path = os.path.join(outpath, 'normal-depth') os.makedirs(save_path, exist_ok=True) base_count = prompt_id grid_count = prompt_id start_code = None if opt.fixed_code: start_code = torch.randn( [opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) precision_scope = autocast if opt.precision == 'autocast' else nullcontext with torch.no_grad(): with precision_scope('cuda'): tic = time.time() all_samples = list() for n in trange(opt.n_iter, desc='Sampling'): for prompts in tqdm(data, desc='data'): uc = None if opt.scale != 1.0: # uc = model.get_learned_conditioning(batch_size * [""]) uc = model.get_learned_conditioning( batch_size * [NEGATIVE_PROMPTS]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples_ddim, _ = sampler.sample( S=50, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) ''' rgbd= (samples_ddim[0]).permute(1,2,0) rgb = rgbd[...,:3] depth = rgbd[...,3] cv2.imwrite('rgb.png',((rgb.clamp(-1,1).detach().cpu().numpy()+1)[...,::-1] /2 * 255).astype(np.uint8)) cv2.imwrite('depth.png', ((depth.clamp(-1,1).detach().cpu().numpy()+1) /2 * 255).astype(np.uint8)) ''' x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp( (x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
x_samples_ddim = split_rgbd_only_tensor(x_samples_ddim)
8
2023-12-06 07:29:34+00:00
16k
nox-410/tvm.tl
python/tvm/topi/arm_cpu/conv2d_gemm.py
[ { "identifier": "get_const_tuple", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_tuple(in_tuple):\n \"\"\"Verifies input tuple is IntImm or Var, returns tuple of int or Var.\n\n Parameters\n ----------\n in_tuple : tuple of Expr\n The input.\n\n Returns\n -------\n out_tuple : tuple of int\n The output.\n \"\"\"\n ret = []\n ana = None\n for elem in in_tuple:\n if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):\n ret.append(elem)\n elif not isinstance(elem, (tvm.tir.IntImm, int)):\n ana = tvm.arith.Analyzer() if ana is None else ana\n elem = ana.simplify(elem)\n if not isinstance(elem, tvm.tir.IntImm):\n ret.append(elem)\n else:\n ret.append(get_const_int(elem))\n else:\n ret.append(get_const_int(elem))\n return tuple(ret)" }, { "identifier": "get_const_int", "path": "python/tvm/topi/utils.py", "snippet": "def get_const_int(expr):\n \"\"\"Verifies expr is integer and get the constant value.\n\n Parameters\n ----------\n expr : tvm.Expr or int\n The input expression.\n\n Returns\n -------\n out_value : int\n The output.\n \"\"\"\n if isinstance(expr, Integral):\n return expr\n if not isinstance(expr, tvm.tir.IntImm):\n ana = tvm.arith.Analyzer()\n expr = ana.simplify(expr)\n if not isinstance(expr, tvm.tir.IntImm):\n raise ValueError(\"Expect value to be constant int\")\n return int(expr.value)" }, { "identifier": "get_pad_tuple", "path": "python/tvm/topi/nn/utils.py", "snippet": "def get_pad_tuple(padding, kernel):\n \"\"\"Common code to get the pad option\n\n Parameters\n ----------\n padding : int or str\n Padding size, or ['VALID', 'SAME']\n\n kernel : tuple of int\n Conv kernel size\n\n Returns\n -------\n pad_top : int\n Padding size on top\n\n pad_left : int\n Padding size on left\n\n pad_down : int\n Padding size on down.\n\n pad_right : int\n Padding size on right.\n \"\"\"\n # compute the padding size\n if isinstance(padding, (tuple, list)):\n if len(padding) == 2:\n pad_h = padding[0] * 2\n pad_w = padding[1] * 2\n elif len(padding) == 4:\n return padding[0], padding[1], padding[2], padding[3]\n else:\n raise ValueError(\"Size of padding can only be 2 or 4\")\n elif isinstance(padding, int):\n pad_h = pad_w = padding * 2\n elif padding == \"VALID\":\n pad_h = 0\n pad_w = 0\n elif padding == \"SAME\":\n pad_h = kernel[0] - 1\n pad_w = kernel[1] - 1\n else:\n raise ValueError(f\"Unknown padding option {padding}\")\n pad_top = (pad_h + 1) // 2\n pad_left = (pad_w + 1) // 2\n return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left" }, { "identifier": "gemm_4x4_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type):\n \"\"\"\n Int8 4x4 matrix multiplication and accumulation using a sequence of\n umull -> uadalp -> umull2 -> uadalp instructions. This function\n takes two arrays of int8 data type A[4][K] and B[4][K], and produces\n a 4x4 matrix which is equal to A*B'.\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_4x4_int8_int8_int32(int8 A[4][K], int8 B[4][K], int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < K; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n M : int\n rows of the matrix A\n N : int\n columns of the matrix B\n K : int\n columns of matrix A\n unroll : bool\n Unroll the loop accumulation if True\n in_type : str, {'uint8', 'int8'}\n\n Returns\n -------\n intrin : TensorIntrin\n The ARM uint8/int8 TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert in_type in [\"uint8\", \"int8\"]\n A = te.placeholder((K // 16, te.var(\"m\"), 16), dtype=in_type, name=\"A\")\n B = te.placeholder((K // 16, te.var(\"n\"), 16), dtype=in_type, name=\"B\")\n dtype_vec = in_type + \"x16\"\n idxm = tvm.tir.indexmod\n\n k = te.reduce_axis((0, K), \"k\")\n C = te.compute(\n (te.var(\"m\"), te.var(\"n\")),\n lambda x, y: te.sum(\n A[k // 16, x, idxm(k, 16)].astype(\"int32\") * B[k // 16, y, idxm(k, 16)].astype(\"int32\"),\n axis=k,\n ),\n name=\"C\",\n )\n\n a_buffer = tvm.tir.decl_buffer(\n A.shape,\n dtype=in_type,\n name=\"a_buffer\",\n offset_factor=1,\n strides=[te.var(\"sa_1\"), te.var(\"sa_2\"), 1],\n )\n\n b_buffer = tvm.tir.decl_buffer(\n B.shape,\n dtype=in_type,\n name=\"b_buffer\",\n offset_factor=1,\n strides=[te.var(\"sb_1\"), te.var(\"sb_2\"), 1],\n )\n\n c_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"c_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n # Intrinsics used in the following algorithm\n umull_intrin = \"llvm.aarch64.neon.umull\" if in_type == \"uint8\" else \"llvm.aarch64.neon.smull\"\n uaddlp_intrin = \"llvm.aarch64.neon.uaddlp\" if in_type == \"uint8\" else \"llvm.aarch64.neon.saddlp\"\n addp_intrin = \"llvm.aarch64.neon.addp\"\n\n def uadalp(a, b):\n \"\"\"Add pair and accumulate\n\n Parameters:\n ----------\n a: int16x8 vector\n b: int16x8 vector\n\n Returns:\n --------\n return a int32x4 vector\n\n Pseudocode:\n ----------\n a += (b0+b1, b2+b3, b4+b5, b6+b7)\n \"\"\"\n\n return a + tvm.tir.call_llvm_pure_intrin(\n \"int32x4\", uaddlp_intrin, tvm.tir.const(1, \"uint32\"), b\n )\n\n def umull(a, b):\n \"\"\"Multiply long (higher part)\n\n Parameters:\n ----------\n a: int8x16 vector\n b: int8x16 vector\n\n Returns:\n --------\n return a int16x8 vector\n\n Pseudocode:\n ----------\n c = (a0*b0, a1*b1, a2*b2, a3*b3, a4*b4, a5*b5, a6*b6, a7*b7)\n \"\"\"\n a_high = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorhigh\", a)\n b_high = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorhigh\", b)\n c = tvm.tir.call_llvm_pure_intrin(\n \"int16x8\", umull_intrin, tvm.tir.const(2, \"uint32\"), a_high, b_high\n )\n return c\n\n def umull2(a, b):\n \"\"\"Multiply long (lower part)\n\n Parameters:\n ----------\n a: int8x16 vector\n b: int8x16 vector\n\n Returns:\n --------\n return a int16x8 vector\n\n Pseudocode:\n ----------\n c = (a8*b8, a9*b9, a10*b10, a11*b11, a12*b12, a13*b13, a14*b14, a15*b15)\n \"\"\"\n a_low = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorlow\", a)\n b_low = tvm.tir.call_intrin(\"int8x8\", \"tir.vectorlow\", b)\n c = tvm.tir.call_llvm_pure_intrin(\n \"int16x8\", umull_intrin, tvm.tir.const(2, \"uint32\"), a_low, b_low\n )\n return c\n\n def addp(a, b):\n \"\"\"Add two vectors in pairs\n\n Parameters:\n ----------\n a: int32x4 vector\n b: int32x4 vector\n\n Returns:\n --------\n return a int32x4 vector\n\n Pseudocode:\n ----------\n c = (a0+a1, a2+a3, b0+b1, b0+b3)\n \"\"\"\n return tvm.tir.call_llvm_pure_intrin(\n \"int32x4\", addp_intrin, tvm.tir.const(2, \"uint32\"), a, b\n )\n\n def accumulation_loop(M, N, ins, acc, tile_idx):\n \"\"\"Internal tile accumulation. This function\n takes two arrays of int8 data type A[tile_idx][4][16] and B[tile_idx][4][16], produces\n a 4x4 matrix which is equal to A*B' and accumulates into C[4][4]\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_4x4_int8_int8_int32(int8 A[tile_idx][4][K],\n int8 B[tile_idx][4][K],\n int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < 16; k++){\n C[i][j] += A[tile_idx][i][k] * B[tile_idx][j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters:\n ----------\n M : int\n Number of total rows of the output matrix\n N : int\n Number of total columns of the output matrix\n ins : list of tvm.tir.buffer\n Input buffers\n acc : tvm.tir.ir_builder.BufferVar\n Bank of register accumulators\n tiled_idx : int\n Index of a sub-tile of A and B in A[tile_idx][:][:] and B[tile_idx][:][:].\n Please note that 0 <= tile_idx <= K//16\n\n \"\"\"\n a0 = ins[0].vload([tile_idx, 0, 0], dtype_vec)\n a1 = tvm.tir.const(0, \"int8x16\")\n if M > 1:\n a1 = ins[0].vload([tile_idx, 1, 0], dtype_vec)\n a2 = tvm.tir.const(0, \"int8x16\")\n if M > 2:\n a2 = ins[0].vload([tile_idx, 2, 0], dtype_vec)\n a3 = tvm.tir.const(0, \"int8x16\")\n if M > 3:\n a3 = ins[0].vload([tile_idx, 3, 0], dtype_vec)\n\n b0 = ins[1].vload([tile_idx, 0, 0], dtype_vec)\n b1 = tvm.tir.const(0, \"int8x16\")\n if N > 1:\n b1 = ins[1].vload([tile_idx, 1, 0], dtype_vec)\n b2 = tvm.tir.const(0, \"int8x16\")\n if N > 2:\n b2 = ins[1].vload([tile_idx, 2, 0], dtype_vec)\n b3 = tvm.tir.const(0, \"int8x16\")\n if N > 3:\n b3 = ins[1].vload([tile_idx, 3, 0], dtype_vec)\n\n # First half\n # Lower part of a0 * {b0,b1,b2,b3}\n d00 = umull(a0, b0)\n d01 = umull(a0, b1)\n d02 = umull(a0, b2)\n d03 = umull(a0, b3)\n\n # Lower part of a1 * {b0,b1,b2,b3}\n d10 = umull(a1, b0)\n d11 = umull(a1, b1)\n d12 = umull(a1, b2)\n d13 = umull(a1, b3)\n\n # Accumulate\n acc[0] = uadalp(acc[0], d00)\n acc[1] = uadalp(acc[1], d01)\n acc[2] = uadalp(acc[2], d02)\n acc[3] = uadalp(acc[3], d03)\n acc[4] = uadalp(acc[4], d10)\n acc[5] = uadalp(acc[5], d11)\n acc[6] = uadalp(acc[6], d12)\n acc[7] = uadalp(acc[7], d13)\n\n # Higher part of a0 * {b0,b1,b2,b3}\n d00 = umull2(a0, b0)\n d01 = umull2(a0, b1)\n d02 = umull2(a0, b2)\n d03 = umull2(a0, b3)\n\n # Higher part of a1 * {b0,b1,b2,b3}\n d10 = umull2(a1, b0)\n d11 = umull2(a1, b1)\n d12 = umull2(a1, b2)\n d13 = umull2(a1, b3)\n\n # Accumulate again\n acc[0] = uadalp(acc[0], d00)\n acc[1] = uadalp(acc[1], d01)\n acc[2] = uadalp(acc[2], d02)\n acc[3] = uadalp(acc[3], d03)\n acc[4] = uadalp(acc[4], d10)\n acc[5] = uadalp(acc[5], d11)\n acc[6] = uadalp(acc[6], d12)\n acc[7] = uadalp(acc[7], d13)\n\n # Second half\n # Lower part of a2 * {b0,b1,b2,b3}\n d00 = umull(a2, b0)\n d01 = umull(a2, b1)\n d02 = umull(a2, b2)\n d03 = umull(a2, b3)\n\n # Lower part of a3 * {b0,b1,b2,b3}\n d10 = umull(a3, b0)\n d11 = umull(a3, b1)\n d12 = umull(a3, b2)\n d13 = umull(a3, b3)\n\n # Accumulate\n acc[8] = uadalp(acc[8], d00)\n acc[9] = uadalp(acc[9], d01)\n acc[10] = uadalp(acc[10], d02)\n acc[11] = uadalp(acc[11], d03)\n acc[12] = uadalp(acc[12], d10)\n acc[13] = uadalp(acc[13], d11)\n acc[14] = uadalp(acc[14], d12)\n acc[15] = uadalp(acc[15], d13)\n\n # Higher part of a2 * {b0,b1,b2,b3}\n d00 = umull2(a2, b0)\n d01 = umull2(a2, b1)\n d02 = umull2(a2, b2)\n d03 = umull2(a2, b3)\n\n # Lower part of a3 * {b0,b1,b2,b3}\n d10 = umull2(a3, b0)\n d11 = umull2(a3, b1)\n d12 = umull2(a3, b2)\n d13 = umull2(a3, b3)\n\n # Accumulate\n acc[8] = uadalp(acc[8], d00)\n acc[9] = uadalp(acc[9], d01)\n acc[10] = uadalp(acc[10], d02)\n acc[11] = uadalp(acc[11], d03)\n acc[12] = uadalp(acc[12], d10)\n acc[13] = uadalp(acc[13], d11)\n acc[14] = uadalp(acc[14], d12)\n acc[15] = uadalp(acc[15], d13)\n\n def _intrin_func(ins, outs):\n def _instr():\n ib = tvm.tir.ir_builder.create()\n # Allocate a local buffer (possibly translates to registers)\n acc = ib.allocate(\"int32x4\", 16, name=\"accs\", scope=\"local\")\n m = outs[0].shape[0]\n n = outs[0].shape[1]\n # Initialization\n for i in range(0, 16):\n acc[i] = tvm.tir.const(0, \"int32x4\")\n\n if unroll:\n for i in range(0, int(K // 16)):\n accumulation_loop(M, N, ins, acc, i)\n else:\n with ib.for_range(0, K // 16, name=\"i\") as i:\n accumulation_loop(M, N, ins, acc, i)\n\n # Final accumulations\n # acc[4*r + c] contains the partial accumulations of element C[r][c]\n #\n # In particular:\n # acc[4*r] contains the partial sums of a[r,0:K].*b[0,0:K] -> (a,b,c,d)\n # acc[4*r+1] contains the partial sums of a[r, 0:K].*b[1,0:K] -> (e,f,g,h)\n # acc[4*r+2] contains the partial sums of a[r, 0:K].*b[2,0:K] -> (i,j,k,l)\n # acc[4*r+3] contains the partial sums of a[r, 0:K].*b[3,0:K] -> (m,n,o,p)\n #\n # Please note that 0<= r, c < 4\n\n acc[0] = addp(acc[0], acc[1]) # (a+b, c+d, e+f, g+h)\n acc[1] = addp(acc[2], acc[3]) # (i+j, k+l, m+n, o+p)\n acc[0] = addp(acc[0], acc[1]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[4] = addp(acc[4], acc[5]) # (a+b, c+d, e+f, g+h)\n acc[5] = addp(acc[6], acc[7]) # (i+j, k+l, m+n, o+p)\n acc[4] = addp(acc[4], acc[5]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[8] = addp(acc[8], acc[9]) # (a+b, c+d, e+f, g+h)\n acc[9] = addp(acc[10], acc[11]) # (i+j, k+l, m+n, o+p)\n acc[8] = addp(acc[8], acc[9]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n acc[12] = addp(acc[12], acc[13]) # (a+b, c+d, e+f, g+h)\n acc[13] = addp(acc[14], acc[15]) # (i+j, k+l, m+n, o+p)\n acc[12] = addp(acc[12], acc[13]) # (a+b+c+d, e+f+g+h, i+j+k+l, m+n+o+p)\n\n # Store the result\n if N > 3:\n out_0 = acc[0]\n out_1 = acc[4]\n out_2 = acc[8]\n out_3 = acc[12]\n elif N > 2:\n out_0 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32x3\", \"tir.reinterpret\", acc[12])\n elif N > 1:\n out_0 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32x2\", \"tir.reinterpret\", acc[12])\n else:\n out_0 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[0])\n out_1 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[4])\n out_2 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[8])\n out_3 = tvm.tir.call_intrin(\"int32\", \"tir.reinterpret\", acc[12])\n\n ib.emit(outs[0].vstore([0, 0], out_0))\n if M > 1:\n ib.emit(outs[0].vstore([1, 0], out_1))\n if M > 2:\n ib.emit(outs[0].vstore([2, 0], out_2))\n if M > 3:\n ib.emit(outs[0].vstore([3, 0], out_3))\n return ib.get()\n\n # body, reset, update\n return _instr()\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: a_buffer, B: b_buffer, C: c_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_4x4_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_4x4_int8_int8_int32(dtype):\n \"\"\"\n Int8 4x4 matrix multiplication and accumulation using sdot/udot\n instructions. This function takes two arrays of int8 datatype\n -- A[4][4] and B[4][4] and produces a 4x4 matrix\n which is equal to A*B'.\n\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void gemm_acc_4x4_int8_int8_int32(int8 A[4][4], int8 B[4][4], int32 C[4][4]){\n for (int i = 0; i < 4; i++){\n for (int j = 0; j < 4; j++){\n for (int k = 0; k < 4; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Notes:\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n # This needs to be a variable number of \"rows\" since TVM\n # \"thinks\" I only need to compute one row because of\n # padding\n A = te.placeholder((te.var(\"rows\"), 4), dtype, name=\"A\")\n B = te.placeholder((4, 4), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n\n k = te.reduce_axis((0, 4), name=\"k\")\n C = te.compute(\n (te.var(\"rows\"), 4),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.sdot\" if dtype == \"int8\" else \"llvm.aarch64.neon.udot\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n for i in range(0, 4):\n ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, \"int32x4\")))\n return ib.get()\n # Load all the elements of tile A.\n # vec_a = [a, b, c, d,\n # e, f, g, h,\n # l, m, n, o,\n # p, q, r, s];\n vec_a = ins[0].vload([0, 0], dtype_vec)\n\n # Replicate 4 times the i-th row of A. For instance,\n # vec_a[0] = [a, b, c, d,\n # a, b, c, d,\n # a, b, c, d,\n # a, b, c, d,];\n vec_aa = [select_word(vec_a, i, dtype_vec) for i in range(0, 4)]\n\n # Load all the elements of B. Remember that B\n # is transposed:\n # vec_b = [0, 4, 8, 12,\n # 1, 5, 9, 13,\n # 2, 6, 10, 14,\n # 3, 7, 11, 15,];\n vec_b = ins[1].vload([0, 0], dtype_vec)\n\n # Execute the dot product\n for i in range(0, 4):\n vec_c = outs[0].vload([i, 0], \"int32x4\")\n # Compute the product between the i-th row of A\n # and all the rows of B. Remember that sdot/udot\n # subdive the input vectors in 16 elements\n # and then take the dot product among each group.\n # The result is stored in a int32x4 register\n #\n # For instance, for i=0, we have:\n # sdot(vec_aa[0], vec_b) = [a*0+b*4+c*8+d*12,\n # a*1+b*5+c*9+d*13,\n # a*2+b*6+c*10+d*14,\n # a*3+b*7+c*11+d*15]\n vdot = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_b, vec_aa[i]\n )\n\n # Store the result\n ib.emit(outs[0].vstore([i, 0], vdot))\n\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_nx16_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_nx16_int8_int8_int32(dtype, rows):\n \"\"\"\n Int8 nx16 matrix multiplication and accumulation using sdot/udot instructions\n This function takes two arrays of int8 datatype -- A[n][4] and\n B[4][16] and produces a rowsx16 matrix which is equal to A*B'\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void mmla_nx16_int8_int8_int32(int8 A[n][16], int8 B[4][16][4], int32 output[n][16]){\n for (int i = 0; i < n; i++){\n for (int j = 0; j < 16; j++){\n for (int k = 0; k < 16; k++){\n out[i][j] += A[i][k] * B[k//4][j][k%4]\n }\n }\n }\n }\n\n Notes:\n * The tile size of B is 16x4. Since the reduction variable k moves between 0 and 16\n we need 4 tiles of B to compute a single row of the output. The first 4 values of\n k will be fetched from B[0][j][k], the second batch of 4 from B[1][j][k] and so on\n * The tiling strategy is picked to maximize register usage.\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n rows : int\n Number of the output rows \"n\"\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n A = te.placeholder((rows, 16), dtype, name=\"A\")\n B = te.placeholder((4, 16, 4), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n idxm = tvm.tir.indexmod\n k = te.reduce_axis((0, 16), name=\"k\")\n C = te.compute(\n (rows, 16),\n lambda i, j: te.sum(\n A[i, k].astype(\"int32\") * B[k // 4, j, idxm(k, 4)].astype(\"int32\"), axis=k\n ),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb0\"), te.var(\"sb1\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.sdot\" if dtype == \"int8\" else \"llvm.aarch64.neon.udot\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n for i in range(0, rows):\n ib.emit(outs[0].vstore([i, 0], tvm.tir.const(0, \"int32x16\")))\n return ib.get()\n # Iterate on the number of rows of the output\n for k in range(0, rows):\n # Load 16 elements of A\n # vec_a = [a, b, c, d, e, f, g, h, l, m, n, o, p, q, r, s];\n vec_a = ins[0].vload([k, 0], dtype_vec)\n\n # Iterate over each of the 4 rowsx4 tiles of the output\n for j in range(0, 4):\n # Accumulate over each of the 4 (16x4) tiles contained in B\n for i in range(0, 4):\n # Replicate a single 4-element group of A (A[k, i:i+4])\n vec_aa = select_word(vec_a, i, dtype_vec)\n\n # Load 4 rows (each rows with 4 elements) from B (B[i:i+4, j:j+4])\n # vec_b = [0, 16, 32, 48,\n # 1, 17, 33, 49,\n # 2, 18, 34, 50,\n # 3, 19, 35, 51,];\n vec_b = ins[1].vload([i, 4 * j, 0], dtype_vec)\n\n # Accumulate in the correct part of the output\n vec_c = outs[0].vload([k, 4 * j], \"int32x4\")\n\n # Compute the dot product between the rowsx4 tile\n # from A and the 4x4 tile from B\n #\n # For instance, for i=0, we have:\n # sdot(vec_aa[0], vec_b) = [a*0+b*16+c*32+d*48,\n # a*1+b*17+c*33+d*49,\n # a*2+b*18+c*34+d*50,\n # a*3+b*19+c*35+d*51]\n vdot = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_b, vec_aa\n )\n ib.emit(outs[0].vstore([k, 4 * j], vdot))\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" }, { "identifier": "gemm_acc_2x2_int8_int8_int32", "path": "python/tvm/topi/arm_cpu/tensor_intrin.py", "snippet": "def gemm_acc_2x2_int8_int8_int32(dtype):\n \"\"\"\n Int8 2x2 matrix multiplication using smmla/ummla instructions\n This function takes two arrays of int8 datatype -- A[2][8] and\n B[2][8] and produces a 2x2 matrix which is equal to A*B'\n The pseudo code is as follows.\n\n .. code-block:: c\n\n void mmla_2x2_int8_int8_int32(int8 A[2][8], int8 B[2][8], int32 C[2][2]){\n for (int i = 0; i < 2; i++){\n for (int j = 0; j < 2; j++){\n for (int k = 0; k < 8; k++){\n C[i][j] += A[i][k] * B[j][k]\n }\n }\n }\n\n Parameters\n ----------\n dtype : str, {\"uint8\", \"int8\"}\n Whether it works on unsigned int or signed int\n\n Returns\n -------\n intrin : TensorIntrin\n The Arm TensorIntrin that can be used in tensorizing schedule\n \"\"\"\n assert dtype in [\"uint8\", \"int8\"]\n A = te.placeholder((2, 8), dtype, name=\"A\")\n B = te.placeholder((2, 8), dtype, name=\"B\")\n dtype_vec = dtype + \"x16\"\n\n k = te.reduce_axis((0, 8), name=\"k\")\n C = te.compute(\n (2, 2),\n lambda i, j: te.sum(A[i, k].astype(\"int32\") * B[j, k].astype(\"int32\"), axis=k),\n name=\"C\",\n )\n\n aa_buffer = tvm.tir.decl_buffer(\n A.shape, dtype, name=\"aa_buffer\", offset_factor=1, strides=[te.var(\"sa\"), 1]\n )\n bb_buffer = tvm.tir.decl_buffer(\n B.shape, dtype, name=\"bb_buffer\", offset_factor=1, strides=[te.var(\"sb\"), 1]\n )\n cc_buffer = tvm.tir.decl_buffer(\n C.shape, dtype=\"int32\", name=\"cc_buffer\", offset_factor=1, strides=[te.var(\"sc\"), 1]\n )\n\n llvm_intrin = \"llvm.aarch64.neon.smmla\" if dtype == \"int8\" else \"llvm.aarch64.neon.ummla\"\n\n def _intrin_func(ins, outs):\n def _instr(index):\n ib = tvm.tir.ir_builder.create()\n if index == 1:\n ib.emit(outs[0].vstore([0, 0], tvm.tir.const(0, \"int32x4\")))\n return ib.get()\n # Load in vec_a the two rows of A\n # vec_a = [a, b, c, d, e, f, g, h;\n # i, j, k, l, m, n, o, p,]\n vec_a = ins[0].vload([0, 0], dtype_vec)\n # Load in vec_b the two rows of B\n # vec_b = [0, 2, 4, 6, 8, 10, 12, 14;\n # 1, 3, 5, 7, 9, 11, 13, 14,]\n vec_b = ins[1].vload([0, 0], dtype_vec)\n\n # Execute the matrix multiplication via (s/u)mmla:\n # vec_c = [a*0 + b*2 + c*4 + d*6 +e*8 + f*10 + g*12 + h*14;\n # a*1 + b*3 + c*5 + d*7 +e*9 + f*11 + g*13 + h*15;\n # i*0 + j*2 + k*4 + l*6 +m*8 + n*10 + o*12 + p*14;\n # i*1 + j*3 + k*5 + l*7 +m*9 + n*11 + o*13 + p*15]\n vec_c = outs[0].vload([0, 0], \"int32x4\")\n vmmla = tvm.tir.call_llvm_intrin(\n \"int32x4\", llvm_intrin, tvm.tir.const(3, \"uint32\"), vec_c, vec_a, vec_b\n )\n # Store the result\n ib.emit(outs[0].vstore([0, 0], vmmla))\n return ib.get()\n\n # body, reset, update\n return _instr(0), _instr(1), _instr(2)\n\n buffer_params = {\"offset_factor\": 1}\n return te.decl_tensor_intrin(\n C.op,\n _intrin_func,\n binds={A: aa_buffer, B: bb_buffer, C: cc_buffer},\n default_buffer_params=buffer_params,\n )" } ]
import tvm from tvm.target import Target from tvm import te from tvm.topi import nn from tvm.autotvm.task.space import AnnotateEntity, ReorderEntity, OtherOptionEntity from ..utils import get_const_tuple, get_const_int from ..nn.utils import get_pad_tuple from .tensor_intrin import ( gemm_4x4_int8_int8_int32, gemm_acc_4x4_int8_int8_int32, gemm_acc_nx16_int8_int8_int32, gemm_acc_2x2_int8_int8_int32, )
13,310
) zero = tvm.tir.const(0) else: # No need to pack/unpack, execute GEMM directly C = te.compute( (batches, M_padded, N_padded), lambda b, x, y: te.sum( A[b, x, k].astype("int32") * B_interleaved_t[ y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B) ].astype("int32"), axis=k, ), name="C", ) # We need to ensure that infer bound pass does not remove the padding # which is necessary for the tensorizations to work. So we need to # add a dummy reference to the padding area of the result zero = ( tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] - tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] ) # Reshape the result into a convolution output out_shape = (batches, OH, OW, OC) out = te.compute( out_shape, lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype), name="conv2d_gemm_output", ) return out def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out): """Schedule the conv2d_gemm interleaved strategy""" C = out.op.input_tensors[0] C_interleaved = C.op.input_tensors[0] A_interleaved = C_interleaved.op.input_tensors[0] # Input transform A_interleaved_input = A_interleaved.op.input_tensors[0] if A_interleaved_input.op.name == "A_padded_K" or A_interleaved_input.op.name == "A_padded_M": s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3]) s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2]) s[A_interleaved_input].compute_inline() data_im2col = A_interleaved_input.op.input_tensors[0] else: data_im2col = A_interleaved_input b, m, n = data_im2col.op.axis if data_im2col.op.name == "data_im2col": n_size = data_im2col.shape[2] if n_size % 16 == 0: split_factor = 16 else: split_factor = 8 n_outer, n_inner = s[data_im2col].split(n, split_factor) s[data_im2col].unroll(n_outer) s[data_im2col].vectorize(n_inner) b_m_fused = s[data_im2col].fuse(b, m) s[data_im2col].parallel(b_m_fused) else: s[data_im2col].compute_inline() # Computation(through tensorize) b, xo, yo, xi, yi = C_interleaved.op.axis[0:5] outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo]) b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm) s[C_interleaved].parallel(b_outer_gemm_fused) s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused) _, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis cfg["A_interleaved_unroll_vec"].apply( s, A_interleaved, [outer_A_interleaved, inner_A_interleaved] ) in_type = A_interleaved.dtype out_type = C.dtype k = C_interleaved.op.reduce_axis[0] _, M, N = C.shape if in_type in ["int8", "uint8"]: target = Target.current(allow_none=False) if target.features.has_matmul_i8: gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type) xi_inner, yi_inner = C_interleaved.op.axis[-2:] k_outer, k_inner = s[C_interleaved].split(k, 8) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner ) s[C_interleaved].tensorize(xi_inner, gemm_acc) s[C_interleaved].unroll(xi) s[C_interleaved].unroll(yi) elif target.features.has_dotprod: gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type) xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile( xi, yi, x_factor=8, y_factor=4 ) k_outer, k_inner = s[C_interleaved].split(k, 4) xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, xi_outer, yi_outer, k_outer, xi_inner_outer, xi_inner_inner, yi_inner, k_inner, ) s[C_interleaved].tensorize(xi_inner_inner, gemm_acc) s[C_interleaved].unroll(xi_inner_outer) elif target.features.has_asimd: s[C_interleaved].reorder(yi, xi) K = A_interleaved_input.shape[2] assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported" unroll = cfg["gemm_quantized_unroll"].val
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-variable, too-many-locals # pylint: disable=unused-argument, redefined-builtin """GEMM Convolution schedule on ARM""" def configure_knobs(cfg, M, K, target): """Configure auto-tuning knobs for the interleaved strategy""" x, y = cfg.axis(M // 4), cfg.axis(K // 16) cfg.define_reorder("reorder_gemm", [x, y], policy="candidate", candidate=[[x, y], [y, x]]) outer_loop, inner_loop = cfg.axis(4), cfg.axis(16) cfg.define_annotate( "A_interleaved_unroll_vec", [outer_loop, inner_loop], policy="try_unroll_vec" ) # Fallback configuration if cfg.is_fallback: cfg["reorder_gemm"] = ReorderEntity([0, 1]) cfg["A_interleaved_unroll_vec"] = AnnotateEntity(["unroll", "vec"]) if not target.features.has_dotprod: cfg.define_knob("gemm_quantized_unroll", [True, False]) if cfg.is_fallback: cfg["gemm_quantized_unroll"] = OtherOptionEntity(False) # Compute function def compute_conv2d_gemm_without_weight_transform( cfg, data, B_interleaved_t, strides, padding, dilation, out_dtype, kernel_size, output_channels, interleave_A, ): """Compute conv2d by transforming the input, executing GEMM and transforming the output back""" batches, IH, IW, IC = get_const_tuple(data.shape) KH, KW = get_const_tuple(kernel_size) OC = get_const_int(output_channels) kernel_area = KH * KW if isinstance(dilation, int): dilation_h = dilation_w = dilation else: dilation_h, dilation_w = get_const_tuple(dilation) dilated_kernel_h = (KH - 1) * dilation_h + 1 dilated_kernel_w = (KW - 1) * dilation_w + 1 pad_top, pad_left, pad_down, pad_right = get_pad_tuple( padding, (dilated_kernel_h, dilated_kernel_w) ) HSTR, WSTR = strides if isinstance(strides, (tuple, list)) else (strides, strides) OH = (IH + pad_top + pad_down - dilated_kernel_h) // HSTR + 1 OW = (IW + pad_left + pad_right - dilated_kernel_w) // WSTR + 1 if pad_top or pad_left: data_pad = nn.pad( data, [0, pad_top, pad_left, 0], [0, pad_down, pad_right, 0], name="data_pad" ) else: data_pad = data # Im2col M = OH * OW K = IC * kernel_area N = OC A_shape = (batches, M, K) if kernel_area == 1: A = tvm.topi.reshape(data_pad, A_shape) else: A = te.compute( A_shape, lambda n, x, y: data_pad[ n, HSTR * (x // OW) + dilation_h * ((y // IC) // KW), WSTR * (x % OW) + dilation_w * ((y // IC) % KW), y % IC, ], name="data_im2col", ) # Pad if necessary N_transformed = B_interleaved_t.shape[0] tile_rows_B = B_interleaved_t.shape[2] tile_cols_B = B_interleaved_t.shape[3] # Select the tiling strategy for A. # The tiling information is chosen to maximize register usage during # the tile computation. # # Please refer to: # - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-performance-for-armv8-architectures # pylint: disable=line-too-long # - https://discuss.tvm.apache.org/t/rfc-accelerate-quantized-convolution-through-dot-product # - https://discuss.tvm.apache.org/t/rfc-improve-quantized-convolution-through-mmla-instruction # - Conv2DGemmWeightTransformRel in src/relay/op/nn/convolution.h # In order to have more information # target = Target.current(allow_none=False) if target.features.has_matmul_i8: # If smmla/ummla is enabled, we are loading 8 rows from A. Each row # will contain 8 elements tile_rows_A = 8 tile_cols_A = 8 elif target.features.has_dotprod and interleave_A: # If dot product has been enabled, and we are interleaving A # tile size should be 8x4 tile_rows_A = 8 tile_cols_A = 4 else: # If either there is no dot product or if we are using a native strategy # tile size should be 4x16 tile_rows_A = 4 tile_cols_A = 16 pad_M = 0 pad_K = 0 if M % tile_rows_A != 0: pad_M = tile_rows_A - (M % tile_rows_A) if K % tile_cols_A != 0: pad_K = tile_cols_A - (K % tile_cols_A) M_padded = M + pad_M K_padded = K + pad_K N_padded = N_transformed * tile_rows_B pad_before = (0, 0, 0) pad_after = (0, pad_M, pad_K) if pad_K != 0: A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded_K") elif pad_M != 0: A = nn.pad(A, pad_before=pad_before, pad_after=pad_after, name="A_padded_M") idxm = tvm.tir.indexmod k = te.reduce_axis((0, K_padded), "k") if interleave_A: # Configuration space configure_knobs(cfg, M_padded, K_padded, target) # Pack the input data A_interleaved = te.compute( (batches, M_padded // tile_rows_A, K_padded // tile_cols_A, tile_rows_A, tile_cols_A), lambda b, x, y, z, w: A[b, z + tile_rows_A * x, w + tile_cols_A * y], name="A_interleaved", ) target = Target.current(allow_none=False) if target.features.has_matmul_i8: # Execute GEMM. In the case of mmla, we need to enforce the tiling # from the compute. This is because mmla is doing a tiled computation # as well. So we have a big 8x12 tile, with small 2x2 sub-tiles # generated by mmla. In theory we could make the tile 2x2 and # fuse and split during scheduling, but this would not work # because of possible padding C_interleaved = te.compute( ( batches, M_padded // tile_rows_A, N_transformed, tile_rows_A // 2, tile_rows_B // 2, 2, 2, ), lambda b, x, y, w, z, s, t: te.sum( A_interleaved[b, x, k // tile_cols_A, 2 * w + s, idxm(k, tile_cols_A)].astype( "int32" ) * B_interleaved_t[y, k // tile_cols_B, 2 * z + t, idxm(k, tile_cols_B)].astype( "int32" ), axis=k, ), name="C_interleaved", ) # Ensure the padding needed for tensorize does not get removed during tir passes # by adding a dummy reference to the specific padded area of the result zero = ( tvm.tir.const(1, C_interleaved.dtype) * C_interleaved[ batches - 1, M // tile_rows_A, N_transformed - 1, idxm(M, tile_rows_A) // 2, tile_rows_B // 2 - 1, 1, 1, ] - tvm.tir.const(1, C_interleaved.dtype) * C_interleaved[ batches - 1, M // tile_rows_A, N_transformed - 1, idxm(M, tile_rows_A) // 2, tile_rows_B // 2 - 1, 1, 1, ] ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: ( C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A) // 2, idxm(y, tile_rows_B) // 2, idxm(idxm(x, tile_rows_A), 2), idxm(idxm(y, tile_rows_B), 2), ] + zero ).astype(out_dtype), name="C", ) else: # Execute GEMM C_interleaved = te.compute( (batches, M_padded // tile_rows_A, N_transformed, tile_rows_A, tile_rows_B), lambda b, x, y, w, z: te.sum( A_interleaved[b, x, k // tile_cols_A, w, idxm(k, tile_cols_A)].astype("int32") * B_interleaved_t[y, k // tile_cols_B, z, idxm(k, tile_cols_B)].astype("int32"), axis=k, ), name="C_interleaved", ) # Unpack the result C = te.compute( (batches, M, N), lambda b, x, y: C_interleaved[ b, x // tile_rows_A, y // tile_rows_B, idxm(x, tile_rows_A), idxm(y, tile_rows_B), ].astype(out_dtype), name="C", ) zero = tvm.tir.const(0) else: # No need to pack/unpack, execute GEMM directly C = te.compute( (batches, M_padded, N_padded), lambda b, x, y: te.sum( A[b, x, k].astype("int32") * B_interleaved_t[ y // tile_rows_B, k // tile_cols_B, idxm(y, tile_rows_B), idxm(k, tile_cols_B) ].astype("int32"), axis=k, ), name="C", ) # We need to ensure that infer bound pass does not remove the padding # which is necessary for the tensorizations to work. So we need to # add a dummy reference to the padding area of the result zero = ( tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] - tvm.tir.const(1, C.dtype) * C[0, M_padded - 1, N_padded - 1] ) # Reshape the result into a convolution output out_shape = (batches, OH, OW, OC) out = te.compute( out_shape, lambda b, x, y, z: (C(b, y + OW * x, z) + zero).astype(out_dtype), name="conv2d_gemm_output", ) return out def schedule_conv2d_gemm_interleaved(cfg, s, out, final_out): """Schedule the conv2d_gemm interleaved strategy""" C = out.op.input_tensors[0] C_interleaved = C.op.input_tensors[0] A_interleaved = C_interleaved.op.input_tensors[0] # Input transform A_interleaved_input = A_interleaved.op.input_tensors[0] if A_interleaved_input.op.name == "A_padded_K" or A_interleaved_input.op.name == "A_padded_M": s[A_interleaved_input].compute_at(s[A_interleaved], A_interleaved.op.axis[3]) s[A_interleaved_input].vectorize(A_interleaved_input.op.axis[2]) s[A_interleaved_input].compute_inline() data_im2col = A_interleaved_input.op.input_tensors[0] else: data_im2col = A_interleaved_input b, m, n = data_im2col.op.axis if data_im2col.op.name == "data_im2col": n_size = data_im2col.shape[2] if n_size % 16 == 0: split_factor = 16 else: split_factor = 8 n_outer, n_inner = s[data_im2col].split(n, split_factor) s[data_im2col].unroll(n_outer) s[data_im2col].vectorize(n_inner) b_m_fused = s[data_im2col].fuse(b, m) s[data_im2col].parallel(b_m_fused) else: s[data_im2col].compute_inline() # Computation(through tensorize) b, xo, yo, xi, yi = C_interleaved.op.axis[0:5] outer_gemm, inner_gemm = cfg["reorder_gemm"].apply(s, C_interleaved, [xo, yo]) b_outer_gemm_fused = s[C_interleaved].fuse(b, outer_gemm) s[C_interleaved].parallel(b_outer_gemm_fused) s[A_interleaved].compute_at(s[C_interleaved], b_outer_gemm_fused) _, _, _, outer_A_interleaved, inner_A_interleaved = A_interleaved.op.axis cfg["A_interleaved_unroll_vec"].apply( s, A_interleaved, [outer_A_interleaved, inner_A_interleaved] ) in_type = A_interleaved.dtype out_type = C.dtype k = C_interleaved.op.reduce_axis[0] _, M, N = C.shape if in_type in ["int8", "uint8"]: target = Target.current(allow_none=False) if target.features.has_matmul_i8: gemm_acc = gemm_acc_2x2_int8_int8_int32(in_type) xi_inner, yi_inner = C_interleaved.op.axis[-2:] k_outer, k_inner = s[C_interleaved].split(k, 8) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, k_outer, xi, yi, xi_inner, yi_inner, k_inner ) s[C_interleaved].tensorize(xi_inner, gemm_acc) s[C_interleaved].unroll(xi) s[C_interleaved].unroll(yi) elif target.features.has_dotprod: gemm_acc = gemm_acc_4x4_int8_int8_int32(in_type) xi_outer, yi_outer, xi_inner, yi_inner = s[C_interleaved].tile( xi, yi, x_factor=8, y_factor=4 ) k_outer, k_inner = s[C_interleaved].split(k, 4) xi_inner_outer, xi_inner_inner = s[C_interleaved].split(xi_inner, 4) s[C_interleaved].reorder( b_outer_gemm_fused, inner_gemm, xi_outer, yi_outer, k_outer, xi_inner_outer, xi_inner_inner, yi_inner, k_inner, ) s[C_interleaved].tensorize(xi_inner_inner, gemm_acc) s[C_interleaved].unroll(xi_inner_outer) elif target.features.has_asimd: s[C_interleaved].reorder(yi, xi) K = A_interleaved_input.shape[2] assert in_type in ["int8", "uint8"], "Only int8 and uint8 gemm are supported" unroll = cfg["gemm_quantized_unroll"].val
gemm = gemm_4x4_int8_int8_int32(M, N, K, unroll, in_type)
3
2023-12-14 02:37:47+00:00
16k
yolain/ComfyUI-Easy-Use
py/easyNodes.py
[ { "identifier": "advanced_encode", "path": "py/adv_encode.py", "snippet": "def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized = clip.tokenize(text, return_word_ids=True)\n if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):\n embs_l = None\n embs_g = None\n pooled = None\n if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):\n embs_l, _ = advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n if 'g' in tokenized:\n embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x,\n encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n return prepareXL(embs_l, embs_g, pooled, clip_balance)\n else:\n return advanced_encode_from_tokens(tokenized['l'],\n token_normalization,\n weight_interpretation,\n lambda x: (clip.encode_from_tokens({'l': x}), None),\n w_max=w_max)" }, { "identifier": "advanced_encode_XL", "path": "py/adv_encode.py", "snippet": "def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5,\n apply_to_pooled=True):\n tokenized1 = clip.tokenize(text1, return_word_ids=True)\n tokenized2 = clip.tokenize(text2, return_word_ids=True)\n\n embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_l),\n w_max=w_max,\n return_pooled=False)\n\n embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],\n token_normalization,\n weight_interpretation,\n lambda x: encode_token_weights(clip, x, encode_token_weights_g),\n w_max=w_max,\n return_pooled=True,\n apply_to_pooled=apply_to_pooled)\n\n gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])\n repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])\n repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])\n\n return prepareXL(embs_l.expand((-1, repeat_l, -1)), embs_g.expand((-1, repeat_g, -1)), pooled, clip_balance)" }, { "identifier": "BASE_RESOLUTIONS", "path": "py/config.py", "snippet": "BASE_RESOLUTIONS = [\n (\"自定义\", \"自定义\"),\n (512, 512),\n (512, 768),\n (768, 512),\n (576, 1024),\n (768, 1024),\n (768, 1280),\n (768, 1344),\n (768, 1536),\n (816, 1920),\n (832, 1152),\n (896, 1152),\n (896, 1088),\n (1024, 1024),\n (1024, 576),\n (1024, 768),\n (1080, 1920),\n (1440, 2560),\n (1088, 896),\n (1152, 832),\n (1152, 896),\n (1280, 768),\n (1344, 768),\n (1536, 640),\n (1536, 768),\n (1920, 816),\n (1920, 1080),\n (2560, 1440),\n]" }, { "identifier": "log_node_info", "path": "py/log.py", "snippet": "def log_node_info(node_name, message=None):\n \"\"\"Logs an info message.\"\"\"\n _log_node(COLORS_FG[\"CYAN\"], node_name, message)" }, { "identifier": "log_node_error", "path": "py/log.py", "snippet": "def log_node_error(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"RED\"], node_name, message)" }, { "identifier": "log_node_warn", "path": "py/log.py", "snippet": "def log_node_warn(node_name, message=None):\n \"\"\"Logs an warn message.\"\"\"\n _log_node(COLORS_FG[\"YELLOW\"], node_name, message)" }, { "identifier": "log_node_success", "path": "py/log.py", "snippet": "def log_node_success(node_name, message=None):\n \"\"\"Logs a success message.\"\"\"\n _log_node(COLORS_FG[\"GREEN\"], node_name, message)" }, { "identifier": "process_with_loras", "path": "py/wildcards.py", "snippet": "def process_with_loras(wildcard_opt, model, clip, title=\"Positive\", seed=None, can_load_lora=True, pipe_lora_stack=[]):\n lora_name_cache = []\n\n pass1 = process(wildcard_opt, seed)\n loras = extract_lora_values(pass1)\n pass2 = remove_lora_tags(pass1)\n\n has_noodle_key = True if \"__\" in wildcard_opt else False\n has_loras = True if loras != [] else False\n show_wildcard_prompt = True if has_noodle_key or has_loras else False\n\n for lora_name, model_weight, clip_weight, lbw, lbw_a, lbw_b in loras:\n if (lora_name.split('.')[-1]) not in folder_paths.supported_pt_extensions:\n lora_name = lora_name+\".safetensors\"\n\n lora_name = resolve_lora_name(lora_name_cache, lora_name)\n\n path = folder_paths.get_full_path(\"loras\", lora_name)\n\n if path is not None:\n print(f\"LORA: {lora_name}: {model_weight}, {clip_weight}, LBW={lbw}, A={lbw_a}, B={lbw_b}\")\n\n def default_lora():\n return nodes.LoraLoader().load_lora(model, clip, lora_name, model_weight, clip_weight)\n\n if lbw is not None:\n cls = nodes.NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire']\n if can_load_lora:\n model, clip, _ = cls().doit(model, clip, lora_name, model_weight, clip_weight, False, 0, lbw_a, lbw_b, \"\", lbw)\n pipe_lora_stack.append({\n \"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight,\n \"lora_clip_strength\": clip_weight,\n \"lbw_a\": lbw_a,\n \"lbw_b\": lbw_b,\n \"lbw\": lbw\n })\n else:\n pipe_lora_stack.append({\"lora_name\": lora_name, \"model\": model, \"clip\": clip, \"lora_model_strength\": model_weight, \"lora_clip_strength\": clip_weight})\n if can_load_lora:\n model, clip = default_lora()\n else:\n print(f\"LORA NOT FOUND: {lora_name}\")\n\n # print(f\"{title}: {pass2}\")\n # print(f'{title}_decode:', pass1)\n\n return model, clip, pass2, pass1, show_wildcard_prompt, pipe_lora_stack" }, { "identifier": "get_wildcard_list", "path": "py/wildcards.py", "snippet": "def get_wildcard_list():\n return [f\"__{x}__\" for x in easy_wildcard_dict.keys()]" }, { "identifier": "sample_dpmpp_2s_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2s_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with DPM-Solver++(2S) second-order steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n sigma_fn = lambda t: t.neg().exp()\n t_fn = lambda sigma: sigma.log().neg()\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigma_down == 0:\n # Euler method\n d = to_d(x, sigmas[i], denoised)\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n else:\n # DPM-Solver++(2S)\n t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)\n r = 1 / 2\n h = t_next - t\n s = t + r * h\n x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised\n denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)\n x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2\n # Noise addition\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "sample_dpmpp_2m_sde", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_dpmpp_2m_sde(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n solver_type=\"midpoint\",\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"DPM-Solver++(2M) SDE.\"\"\"\n\n if solver_type not in {\"heun\", \"midpoint\"}:\n raise ValueError(\"solver_type must be 'heun' or 'midpoint'\")\n\n seed = extra_args.get(\"seed\", None)\n sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n old_denoised = None\n h_last = None\n h = None\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n if sigmas[i + 1] == 0:\n # Denoising step\n x = denoised\n else:\n # DPM-Solver++(2M) SDE\n t, s = -sigmas[i].log(), -sigmas[i + 1].log()\n h = s - t\n eta_h = eta * h\n\n x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised\n\n if old_denoised is not None:\n r = h_last / h\n if solver_type == \"heun\":\n x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)\n elif solver_type == \"midpoint\":\n x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)\n\n if eta:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n denoised = None # 次ステップとサイズがあわないのでとりあえずNoneにしておく。\n noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True)\n x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise\n\n old_denoised = denoised\n h_last = h\n return x" }, { "identifier": "sample_lcm", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_lcm(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n noise_sampler=None,\n eta=None,\n s_noise=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n\n x = denoised\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n noise_sampler = default_noise_sampler(x)\n x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])\n\n return x" }, { "identifier": "sample_euler_ancestral", "path": "py/gradual_latent_hires_fix.py", "snippet": "@torch.no_grad()\ndef sample_euler_ancestral(\n model,\n x,\n sigmas,\n extra_args=None,\n callback=None,\n disable=None,\n eta=1.0,\n s_noise=1.0,\n noise_sampler=None,\n upscale_ratio=2.0,\n start_step=5,\n end_step=15,\n upscale_n_step=3,\n unsharp_kernel_size=3,\n unsharp_sigma=0.5,\n unsharp_strength=0.0,\n):\n \"\"\"Ancestral sampling with Euler method steps.\"\"\"\n extra_args = {} if extra_args is None else extra_args\n noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler\n s_in = x.new_ones([x.shape[0]])\n\n # make upscale info\n upscale_steps = []\n step = start_step - 1\n while step < end_step - 1:\n upscale_steps.append(step)\n step += upscale_n_step\n height, width = x.shape[2:]\n upscale_shapes = [\n (int(height * (((upscale_ratio - 1) / i) + 1)), int(width * (((upscale_ratio - 1) / i) + 1)))\n for i in reversed(range(1, len(upscale_steps) + 1))\n ]\n upscale_info = {k: v for k, v in zip(upscale_steps, upscale_shapes)}\n\n for i in trange(len(sigmas) - 1, disable=disable):\n denoised = model(x, sigmas[i] * s_in, **extra_args)\n sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)\n if callback is not None:\n callback({\"x\": x, \"i\": i, \"sigma\": sigmas[i], \"sigma_hat\": sigmas[i], \"denoised\": denoised})\n d = to_d(x, sigmas[i], denoised)\n # Euler method\n dt = sigma_down - sigmas[i]\n x = x + d * dt\n if sigmas[i + 1] > 0:\n # Resize\n if i in upscale_info:\n x = torch.nn.functional.interpolate(x, size=upscale_info[i], mode=\"bicubic\", align_corners=False)\n if unsharp_strength > 0:\n blurred = gaussian_blur(x, kernel_size=unsharp_kernel_size, sigma=unsharp_sigma)\n x = x + unsharp_strength * (x - blurred)\n\n noise_sampler = default_noise_sampler(x)\n noise = noise_sampler(sigmas[i], sigmas[i + 1])\n x = x + noise * sigma_up * s_noise\n return x" }, { "identifier": "DynThresh", "path": "py/dynthres_core.py", "snippet": "class DynThresh:\n\n Modes = [\"Constant\", \"Linear Down\", \"Cosine Down\", \"Half Cosine Down\", \"Linear Up\", \"Cosine Up\", \"Half Cosine Up\", \"Power Up\", \"Power Down\", \"Linear Repeating\", \"Cosine Repeating\", \"Sawtooth\"]\n Startpoints = [\"MEAN\", \"ZERO\"]\n Variabilities = [\"AD\", \"STD\"]\n\n def __init__(self, mimic_scale, threshold_percentile, mimic_mode, mimic_scale_min, cfg_mode, cfg_scale_min, sched_val, experiment_mode, max_steps, separate_feature_channels, scaling_startpoint, variability_measure, interpolate_phi):\n self.mimic_scale = mimic_scale\n self.threshold_percentile = threshold_percentile\n self.mimic_mode = mimic_mode\n self.cfg_mode = cfg_mode\n self.max_steps = max_steps\n self.cfg_scale_min = cfg_scale_min\n self.mimic_scale_min = mimic_scale_min\n self.experiment_mode = experiment_mode\n self.sched_val = sched_val\n self.sep_feat_channels = separate_feature_channels\n self.scaling_startpoint = scaling_startpoint\n self.variability_measure = variability_measure\n self.interpolate_phi = interpolate_phi\n\n def interpret_scale(self, scale, mode, min):\n scale -= min\n max = self.max_steps - 1\n frac = self.step / max\n if mode == \"Constant\":\n pass\n elif mode == \"Linear Down\":\n scale *= 1.0 - frac\n elif mode == \"Half Cosine Down\":\n scale *= math.cos(frac)\n elif mode == \"Cosine Down\":\n scale *= math.cos(frac * 1.5707)\n elif mode == \"Linear Up\":\n scale *= frac\n elif mode == \"Half Cosine Up\":\n scale *= 1.0 - math.cos(frac)\n elif mode == \"Cosine Up\":\n scale *= 1.0 - math.cos(frac * 1.5707)\n elif mode == \"Power Up\":\n scale *= math.pow(frac, self.sched_val)\n elif mode == \"Power Down\":\n scale *= 1.0 - math.pow(frac, self.sched_val)\n elif mode == \"Linear Repeating\":\n portion = (frac * self.sched_val) % 1.0\n scale *= (0.5 - portion) * 2 if portion < 0.5 else (portion - 0.5) * 2\n elif mode == \"Cosine Repeating\":\n scale *= math.cos(frac * 6.28318 * self.sched_val) * 0.5 + 0.5\n elif mode == \"Sawtooth\":\n scale *= (frac * self.sched_val) % 1.0\n scale += min\n return scale\n\n def dynthresh(self, cond, uncond, cfg_scale, weights):\n mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min)\n cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min)\n # uncond shape is (batch, 4, height, width)\n conds_per_batch = cond.shape[0] / uncond.shape[0]\n assert conds_per_batch == int(conds_per_batch), \"Expected # of conds per batch to be constant across batches\"\n cond_stacked = cond.reshape((-1, int(conds_per_batch)) + uncond.shape[1:])\n\n ### Normal first part of the CFG Scale logic, basically\n diff = cond_stacked - uncond.unsqueeze(1)\n if weights is not None:\n diff = diff * weights\n relative = diff.sum(1)\n\n ### Get the normal result for both mimic and normal scale\n mim_target = uncond + relative * mimic_scale\n cfg_target = uncond + relative * cfg_scale\n ### If we weren't doing mimic scale, we'd just return cfg_target here\n\n ### Now recenter the values relative to their average rather than absolute, to allow scaling from average\n mim_flattened = mim_target.flatten(2)\n cfg_flattened = cfg_target.flatten(2)\n mim_means = mim_flattened.mean(dim=2).unsqueeze(2)\n cfg_means = cfg_flattened.mean(dim=2).unsqueeze(2)\n mim_centered = mim_flattened - mim_means\n cfg_centered = cfg_flattened - cfg_means\n\n if self.sep_feat_channels:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std(dim=2).unsqueeze(2)\n cfg_scaleref = cfg_centered.std(dim=2).unsqueeze(2)\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max(dim=2).values.unsqueeze(2)\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile, dim=2).unsqueeze(2)\n\n else:\n if self.variability_measure == 'STD':\n mim_scaleref = mim_centered.std()\n cfg_scaleref = cfg_centered.std()\n else: # 'AD'\n mim_scaleref = mim_centered.abs().max()\n cfg_scaleref = torch.quantile(cfg_centered.abs(), self.threshold_percentile)\n\n if self.scaling_startpoint == 'ZERO':\n scaling_factor = mim_scaleref / cfg_scaleref\n result = cfg_flattened * scaling_factor\n\n else: # 'MEAN'\n if self.variability_measure == 'STD':\n cfg_renormalized = (cfg_centered / cfg_scaleref) * mim_scaleref\n else: # 'AD'\n ### Get the maximum value of all datapoints (with an optional threshold percentile on the uncond)\n max_scaleref = torch.maximum(mim_scaleref, cfg_scaleref)\n ### Clamp to the max\n cfg_clamped = cfg_centered.clamp(-max_scaleref, max_scaleref)\n ### Now shrink from the max to normalize and grow to the mimic scale (instead of the CFG scale)\n cfg_renormalized = (cfg_clamped / max_scaleref) * mim_scaleref\n\n ### Now add it back onto the averages to get into real scale again and return\n result = cfg_renormalized + cfg_means\n\n actual_res = result.unflatten(2, mim_target.shape[2:])\n\n if self.interpolate_phi != 1.0:\n actual_res = actual_res * self.interpolate_phi + cfg_target * (1.0 - self.interpolate_phi)\n\n if self.experiment_mode == 1:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n if num[0][0][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][1][y][x] > 1.0:\n num[0][1][y][x] *= 0.5\n if num[0][2][y][x] > 1.5:\n num[0][2][y][x] *= 0.5\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 2:\n num = actual_res.cpu().numpy()\n for y in range(0, 64):\n for x in range (0, 64):\n over_scale = False\n for z in range(0, 4):\n if abs(num[0][z][y][x]) > 1.5:\n over_scale = True\n if over_scale:\n for z in range(0, 4):\n num[0][z][y][x] *= 0.7\n actual_res = torch.from_numpy(num).to(device=uncond.device)\n elif self.experiment_mode == 3:\n coefs = torch.tensor([\n # R G B W\n [0.298, 0.207, 0.208, 0.0], # L1\n [0.187, 0.286, 0.173, 0.0], # L2\n [-0.158, 0.189, 0.264, 0.0], # L3\n [-0.184, -0.271, -0.473, 1.0], # L4\n ], device=uncond.device)\n res_rgb = torch.einsum(\"laxy,ab -> lbxy\", actual_res, coefs)\n max_r, max_g, max_b, max_w = res_rgb[0][0].max(), res_rgb[0][1].max(), res_rgb[0][2].max(), res_rgb[0][3].max()\n max_rgb = max(max_r, max_g, max_b)\n print(f\"test max = r={max_r}, g={max_g}, b={max_b}, w={max_w}, rgb={max_rgb}\")\n if self.step / (self.max_steps - 1) > 0.2:\n if max_rgb < 2.0 and max_w < 3.0:\n res_rgb /= max_rgb / 2.4\n else:\n if max_rgb > 2.4 and max_w > 3.0:\n res_rgb /= max_rgb / 2.4\n actual_res = torch.einsum(\"laxy,ab -> lbxy\", res_rgb, coefs.inverse())\n\n return actual_res" } ]
import sys import os import re import json import time import math import torch import psutil import random import datetime import comfy.sd import comfy.utils import numpy as np import folder_paths import comfy.samplers import comfy.controlnet import latent_preview import comfy.model_base import comfy.model_management from pathlib import Path from comfy.sd import CLIP, VAE from comfy.cli_args import args from urllib.request import urlopen from collections import defaultdict from PIL.PngImagePlugin import PngInfo from PIL import Image, ImageDraw, ImageFont from comfy.model_patcher import ModelPatcher from comfy_extras.chainner_models import model_loading from typing import Dict, List, Optional, Tuple, Union, Any from .adv_encode import advanced_encode, advanced_encode_XL from server import PromptServer from nodes import VAELoader, MAX_RESOLUTION, RepeatLatentBatch, NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS, ConditioningSetMask from comfy_extras.nodes_mask import LatentCompositeMasked from .config import BASE_RESOLUTIONS from .log import log_node_info, log_node_error, log_node_warn, log_node_success from .wildcards import process_with_loras, get_wildcard_list from comfy_extras.nodes_stable3d import camera_embeddings from .gradual_latent_hires_fix import sample_dpmpp_2s_ancestral, sample_dpmpp_2m_sde, sample_lcm, sample_euler_ancestral from .dynthres_core import DynThresh
10,886
self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder) log_node_success("",f"{folder} Created Successfully") except OSError:
# 加载器 class easyLoader: def __init__(self): self.loaded_objects = { "ckpt": defaultdict(tuple), # {ckpt_name: (model, ...)} "clip": defaultdict(tuple), "clip_vision": defaultdict(tuple), "bvae": defaultdict(tuple), "vae": defaultdict(object), "lora": defaultdict(dict), # {lora_name: {UID: (model_lora, clip_lora)}} } self.memory_threshold = self.determine_memory_threshold(0.7) def clean_values(self, values: str): original_values = values.split("; ") cleaned_values = [] for value in original_values: cleaned_value = value.strip(';').strip() if cleaned_value == "": continue try: cleaned_value = int(cleaned_value) except ValueError: try: cleaned_value = float(cleaned_value) except ValueError: pass cleaned_values.append(cleaned_value) return cleaned_values def clear_unused_objects(self, desired_names: set, object_type: str): keys = set(self.loaded_objects[object_type].keys()) for key in keys - desired_names: del self.loaded_objects[object_type][key] def get_input_value(self, entry, key): val = entry["inputs"][key] return val if isinstance(val, str) else val[0] def process_pipe_loader(self, entry, desired_ckpt_names, desired_vae_names, desired_lora_names, desired_lora_settings, num_loras=3, suffix=""): for idx in range(1, num_loras + 1): lora_name_key = f"{suffix}lora{idx}_name" desired_lora_names.add(self.get_input_value(entry, lora_name_key)) setting = f'{self.get_input_value(entry, lora_name_key)};{entry["inputs"][f"{suffix}lora{idx}_model_strength"]};{entry["inputs"][f"{suffix}lora{idx}_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, f"{suffix}ckpt_name")) desired_vae_names.add(self.get_input_value(entry, f"{suffix}vae_name")) def update_loaded_objects(self, prompt): desired_ckpt_names = set() desired_vae_names = set() desired_lora_names = set() desired_lora_settings = set() for entry in prompt.values(): class_type = entry["class_type"] if class_type == "easy a1111Loader" or class_type == "easy comfyLoader": lora_name = self.get_input_value(entry, "lora_name") desired_lora_names.add(lora_name) setting = f'{lora_name};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}' desired_lora_settings.add(setting) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy zero123Loader" or class_type == 'easy svdLoader': desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name")) desired_vae_names.add(self.get_input_value(entry, "vae_name")) elif class_type == "easy XYInputs: ModelMergeBlocks": desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_1")) desired_ckpt_names.add(self.get_input_value(entry, "ckpt_name_2")) vae_use = self.get_input_value(entry, "vae_use") if vae_use != 'Use Model 1' and vae_use != 'Use Model 2': desired_vae_names.add(vae_use) object_types = ["ckpt", "clip", "bvae", "vae", "lora"] for object_type in object_types: desired_names = desired_ckpt_names if object_type in ["ckpt", "clip", "bvae"] else desired_vae_names if object_type == "vae" else desired_lora_names self.clear_unused_objects(desired_names, object_type) def add_to_cache(self, obj_type, key, value): """ Add an item to the cache with the current timestamp. """ timestamped_value = (value, time.time()) self.loaded_objects[obj_type][key] = timestamped_value def determine_memory_threshold(self, percentage=0.8): """ Determines the memory threshold as a percentage of the total available memory. Args: - percentage (float): The fraction of total memory to use as the threshold. Should be a value between 0 and 1. Default is 0.8 (80%). Returns: - memory_threshold (int): Memory threshold in bytes. """ total_memory = psutil.virtual_memory().total memory_threshold = total_memory * percentage return memory_threshold def get_memory_usage(self): """ Returns the memory usage of the current process in bytes. """ process = psutil.Process(os.getpid()) return process.memory_info().rss def eviction_based_on_memory(self): """ Evicts objects from cache based on memory usage and priority. """ current_memory = self.get_memory_usage() if current_memory < self.memory_threshold: return eviction_order = ["vae", "lora", "bvae", "clip", "ckpt"] for obj_type in eviction_order: if current_memory < self.memory_threshold: break # Sort items based on age (using the timestamp) items = list(self.loaded_objects[obj_type].items()) items.sort(key=lambda x: x[1][1]) # Sorting by timestamp for item in items: if current_memory < self.memory_threshold: break del self.loaded_objects[obj_type][item[0]] current_memory = self.get_memory_usage() def load_checkpoint(self, ckpt_name, config_name=None, load_vision=False): cache_name = ckpt_name if config_name not in [None, "Default"]: cache_name = ckpt_name + "_" + config_name if cache_name in self.loaded_objects["ckpt"]: cache_out = self.loaded_objects["clip_vision"][cache_name][0] if load_vision else self.loaded_objects["clip"][cache_name][0] return self.loaded_objects["ckpt"][cache_name][0], cache_out, self.loaded_objects["bvae"][cache_name][0] ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) output_clip = False if load_vision else True output_clipvision = True if load_vision else False if config_name not in [None, "Default"]: config_path = folder_paths.get_full_path("configs", config_name) loaded_ckpt = comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) else: loaded_ckpt = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=output_clip, output_clipvision=output_clipvision, embedding_directory=folder_paths.get_folder_paths("embeddings")) self.add_to_cache("ckpt", cache_name, loaded_ckpt[0]) self.add_to_cache("bvae", cache_name, loaded_ckpt[2]) if load_vision: out = loaded_ckpt[3] self.add_to_cache("clip_vision", cache_name, out) else: out = loaded_ckpt[1] self.add_to_cache("clip", cache_name, loaded_ckpt[1]) self.eviction_based_on_memory() return loaded_ckpt[0], out, loaded_ckpt[2] def load_vae(self, vae_name): if vae_name in self.loaded_objects["vae"]: return self.loaded_objects["vae"][vae_name][0] vae_path = folder_paths.get_full_path("vae", vae_name) sd = comfy.utils.load_torch_file(vae_path) loaded_vae = comfy.sd.VAE(sd=sd) self.add_to_cache("vae", vae_name, loaded_vae) self.eviction_based_on_memory() return loaded_vae def load_lora(self, lora_name, model, clip, strength_model, strength_clip): model_hash = str(model)[44:-1] clip_hash = str(clip)[25:-1] unique_id = f'{model_hash};{clip_hash};{lora_name};{strength_model};{strength_clip}' if unique_id in self.loaded_objects["lora"] and unique_id in self.loaded_objects["lora"][lora_name]: return self.loaded_objects["lora"][unique_id][0] lora_path = folder_paths.get_full_path("loras", lora_name) lora = comfy.utils.load_torch_file(lora_path, safe_load=True) model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) self.add_to_cache("lora", unique_id, (model_lora, clip_lora)) self.eviction_based_on_memory() return model_lora, clip_lora # 采样器 class easySampler: def __init__(self): self.last_helds: dict[str, list] = { "results": [], "pipe_line": [], } @staticmethod def tensor2pil(image: torch.Tensor) -> Image.Image: """Convert a torch tensor to a PIL image.""" return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) @staticmethod def pil2tensor(image: Image.Image) -> torch.Tensor: """Convert a PIL image to a torch tensor.""" return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) @staticmethod def enforce_mul_of_64(d): d = int(d) if d <= 7: d = 8 leftover = d % 8 # 8 is the number of pixels per byte if leftover != 0: # if the number of pixels is not a multiple of 8 if (leftover < 4): # if the number of pixels is less than 4 d -= leftover # remove the leftover pixels else: # if the number of pixels is more than 4 d += 8 - leftover # add the leftover pixels return int(d) @staticmethod def safe_split(to_split: str, delimiter: str) -> List[str]: """Split the input string and return a list of non-empty parts.""" parts = to_split.split(delimiter) parts = [part for part in parts if part not in ('', ' ', ' ')] while len(parts) < 2: parts.append('None') return parts def common_ksampler(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def custom_ksampler(self, model, seed, steps, cfg, _sampler, sigmas, positive, negative, latent, disable_noise=False, preview_latent=True, disable_pbar=False): device = comfy.model_management.get_torch_device() latent_image = latent["samples"] if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds) noise_mask = None if "noise_mask" in latent: noise_mask = latent["noise_mask"] preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = False if preview_latent: previewer = latent_preview.get_previewer(device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): preview_bytes = None if previewer: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) samples = comfy.sample.sample_custom(model, noise, cfg, _sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return out def get_value_by_id(self, key: str, my_unique_id: Any) -> Optional[Any]: """Retrieve value by its associated ID.""" try: for value, id_ in self.last_helds[key]: if id_ == my_unique_id: return value except KeyError: return None def update_value_by_id(self, key: str, my_unique_id: Any, new_value: Any) -> Union[bool, None]: """Update the value associated with a given ID. Return True if updated, False if appended, None if key doesn't exist.""" try: for i, (value, id_) in enumerate(self.last_helds[key]): if id_ == my_unique_id: self.last_helds[key][i] = (new_value, id_) return True self.last_helds[key].append((new_value, my_unique_id)) return False except KeyError: return False def upscale(self, samples, upscale_method, scale_by, crop): s = samples.copy() width = self.enforce_mul_of_64(round(samples["samples"].shape[3] * scale_by)) height = self.enforce_mul_of_64(round(samples["samples"].shape[2] * scale_by)) if (width > MAX_RESOLUTION): width = MAX_RESOLUTION if (height > MAX_RESOLUTION): height = MAX_RESOLUTION s["samples"] = comfy.utils.common_upscale(samples["samples"], width, height, upscale_method, crop) return (s,) def handle_upscale(self, samples: dict, upscale_method: str, factor: float, crop: bool) -> dict: """Upscale the samples if the upscale_method is not set to 'None'.""" if upscale_method != "None": samples = self.upscale(samples, upscale_method, factor, crop)[0] return samples def init_state(self, my_unique_id: Any, key: str, default: Any) -> Any: """Initialize the state by either fetching the stored value or setting a default.""" value = self.get_value_by_id(key, my_unique_id) if value is not None: return value return default def get_output(self, pipe: dict,) -> Tuple: """Return a tuple of various elements fetched from the input pipe dictionary.""" return ( pipe, pipe.get("images"), pipe.get("model"), pipe.get("positive"), pipe.get("negative"), pipe.get("samples"), pipe.get("vae"), pipe.get("clip"), pipe.get("seed"), ) def get_output_sdxl(self, sdxl_pipe: dict) -> Tuple: """Return a tuple of various elements fetched from the input sdxl_pipe dictionary.""" return ( sdxl_pipe, sdxl_pipe.get("model"), sdxl_pipe.get("positive"), sdxl_pipe.get("negative"), sdxl_pipe.get("vae"), sdxl_pipe.get("refiner_model"), sdxl_pipe.get("refiner_positive"), sdxl_pipe.get("refiner_negative"), sdxl_pipe.get("refiner_vae"), sdxl_pipe.get("samples"), sdxl_pipe.get("clip"), sdxl_pipe.get("images"), sdxl_pipe.get("seed") ) # XY图表 class easyXYPlot: def __init__(self, xyPlotData, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id): self.x_node_type, self.x_type = easySampler.safe_split(xyPlotData.get("x_axis"), ': ') self.y_node_type, self.y_type = easySampler.safe_split(xyPlotData.get("y_axis"), ': ') self.x_values = xyPlotData.get("x_vals") if self.x_type != "None" else [] self.y_values = xyPlotData.get("y_vals") if self.y_type != "None" else [] self.grid_spacing = xyPlotData.get("grid_spacing") self.latent_id = 0 self.output_individuals = xyPlotData.get("output_individuals") self.x_label, self.y_label = [], [] self.max_width, self.max_height = 0, 0 self.latents_plot = [] self.image_list = [] self.num_cols = len(self.x_values) if len(self.x_values) > 0 else 1 self.num_rows = len(self.y_values) if len(self.y_values) > 0 else 1 self.total = self.num_cols * self.num_rows self.num = 0 self.save_prefix = save_prefix self.image_output = image_output self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.my_unique_id = my_unique_id # Helper Functions @staticmethod def define_variable(plot_image_vars, value_type, value, index): plot_image_vars[value_type] = value if value_type in ["seed", "Seeds++ Batch"]: value_label = f"{value}" else: value_label = f"{value_type}: {value}" if "ControlNet" in value_type: if "," in value: line = value.split(',') value_label = f"{value_type}: {line[2]}" if value_type in ["ModelMergeBlocks"]: if ":" in value: line = value.split(':') value_label = f"{line[0]}" elif len(value) > 16: value_label = f"ModelMergeBlocks {index + 1}" else: value_label = f"MMB: {value}" if value_type in ["Positive Prompt S/R"]: value_label = f"pos prompt {index + 1}" if index>0 else f"pos prompt" if value_type in ["Negative Prompt S/R"]: value_label = f"neg prompt {index + 1}" if index>0 else f"neg prompt" if value_type in ["steps", "cfg", "denoise", "clip_skip", "lora_model_strength", "lora_clip_strength"]: value_label = f"{value_type}: {value}" if value_type == "positive": value_label = f"pos prompt {index + 1}" elif value_type == "negative": value_label = f"neg prompt {index + 1}" return plot_image_vars, value_label @staticmethod def get_font(font_size): return ImageFont.truetype(str(Path(os.path.join(Path(__file__).parent.parent, 'resources/OpenSans-Medium.ttf'))), font_size) @staticmethod def update_label(label, value, num_items): if len(label) < num_items: return [*label, value] return label @staticmethod def rearrange_tensors(latent, num_cols, num_rows): new_latent = [] for i in range(num_rows): for j in range(num_cols): index = j * num_rows + i new_latent.append(latent[index]) return new_latent def calculate_background_dimensions(self): border_size = int((self.max_width // 8) * 1.5) if self.y_type != "None" or self.x_type != "None" else 0 bg_width = self.num_cols * (self.max_width + self.grid_spacing) - self.grid_spacing + border_size * ( self.y_type != "None") bg_height = self.num_rows * (self.max_height + self.grid_spacing) - self.grid_spacing + border_size * ( self.x_type != "None") x_offset_initial = border_size if self.y_type != "None" else 0 y_offset = border_size if self.x_type != "None" else 0 return bg_width, bg_height, x_offset_initial, y_offset def adjust_font_size(self, text, initial_font_size, label_width): font = self.get_font(initial_font_size) text_width, _ = font.getsize(text) scaling_factor = 0.9 if text_width > (label_width * scaling_factor): return int(initial_font_size * (label_width / text_width) * scaling_factor) else: return initial_font_size def create_label(self, img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10): label_width = img.width if is_x_label else img.height # Adjust font size font_size = self.adjust_font_size(text, initial_font_size, label_width) font_size = min(max_font_size, font_size) # Ensure font isn't too large font_size = max(min_font_size, font_size) # Ensure font isn't too small label_height = int(font_size * 1.5) if is_x_label else font_size label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0)) d = ImageDraw.Draw(label_bg) font = self.get_font(font_size) # Check if text will fit, if not insert ellipsis and reduce text if d.textsize(text, font=font)[0] > label_width: while d.textsize(text + '...', font=font)[0] > label_width and len(text) > 0: text = text[:-1] text = text + '...' # Compute text width and height for multi-line text text_lines = text.split('\n') text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines]) max_text_width = max(text_widths) total_text_height = sum(text_heights) # Compute position for each line of text lines_positions = [] current_y = 0 for line, line_width, line_height in zip(text_lines, text_widths, text_heights): text_x = (label_width - line_width) // 2 text_y = current_y + (label_height - total_text_height) // 2 current_y += line_height lines_positions.append((line, (text_x, text_y))) # Draw each line of text for line, (text_x, text_y) in lines_positions: d.text((text_x, text_y), line, fill='black', font=font) return label_bg def sample_plot_image(self, plot_image_vars, samples, preview_latent, latents_plot, image_list, disable_noise, start_step, last_step, force_full_denoise, x_value=None, y_value=None): model, clip, vae, positive, negative, seed, steps, cfg = None, None, None, None, None, None, None, None sampler_name, scheduler, denoise = None, None, None # 高级用法 if plot_image_vars["x_node_type"] == "advanced" or plot_image_vars["y_node_type"] == "advanced": if self.x_type == "Seeds++ Batch" or self.y_type == "Seeds++ Batch": seed = int(x_value) if self.x_type == "Seeds++ Batch" else int(y_value) if self.x_type == "Steps" or self.y_type == "Steps": steps = int(x_value) if self.x_type == "Steps" else int(y_value) if self.x_type == "StartStep" or self.y_type == "StartStep": start_step = int(x_value) if self.x_type == "StartStep" else int(y_value) if self.x_type == "EndStep" or self.y_type == "EndStep": last_step = int(x_value) if self.x_type == "EndStep" else int(y_value) if self.x_type == "CFG Scale" or self.y_type == "CFG Scale": cfg = float(x_value) if self.x_type == "CFG Scale" else float(y_value) if self.x_type == "Sampler" or self.y_type == "Sampler" or self.y_type == "Sampler & Scheduler": sampler_name = float(x_value) if self.x_type == "Sampler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Scheduler" or self.y_type == "Scheduler" or self.y_type == "Sampler & Scheduler": scheduler = float(x_value) if self.x_type == "Scheduler" or self.x_type == "Sampler & Scheduler" else float(y_value) if self.x_type == "Denoise" or self.y_type == "Denoise": denoise = float(x_value) if self.x_type == "Denoise" else float(y_value) # 模型叠加 if self.x_type == "ModelMergeBlocks" or self.y_type == "ModelMergeBlocks": ckpt_name_1, ckpt_name_2 = plot_image_vars['models'] model1, clip1, vae1 = easyCache.load_checkpoint(ckpt_name_1) model2, clip2, vae2 = easyCache.load_checkpoint(ckpt_name_2) xy_values = x_value if self.x_type == "ModelMergeBlocks" else y_value if ":" in xy_values: xy_line = xy_values.split(':') xy_values = xy_line[1] xy_arrs = xy_values.split(',') # ModelMergeBlocks if len(xy_arrs) == 3: input, middle, out = xy_arrs kwargs = { "input": input, "middle": middle, "out": out } elif len(xy_arrs) == 30: kwargs = {} kwargs["time_embed."] = xy_arrs[0] kwargs["label_emb."] = xy_arrs[1] for i in range(12): kwargs["input_blocks.{}.".format(i)] = xy_arrs[2+i] for i in range(3): kwargs["middle_block.{}.".format(i)] = xy_arrs[14+i] for i in range(12): kwargs["output_blocks.{}.".format(i)] = xy_arrs[17+i] kwargs["out."] = xy_arrs[29] else: raise Exception("ModelMergeBlocks weight length error") default_ratio = next(iter(kwargs.values())) m = model1.clone() kp = model2.get_key_patches("diffusion_model.") for k in kp: ratio = float(default_ratio) k_unet = k[len("diffusion_model."):] last_arg_size = 0 for arg in kwargs: if k_unet.startswith(arg) and last_arg_size < len(arg): ratio = float(kwargs[arg]) last_arg_size = len(arg) m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) vae_use = plot_image_vars['vae_use'] clip = clip2 if vae_use == 'Use Model 2' else clip1 if vae_use == 'Use Model 2': vae = vae2 elif vae_use == 'Use Model 1': vae = vae1 else: (vae,) = VAELoader().load_vae(vae_use) model = m # 如果存在lora_stack叠加lora optional_lora_stack = plot_image_vars['lora_stack'] if optional_lora_stack is not None and optional_lora_stack != []: for lora in optional_lora_stack: lora_name = lora["lora_name"] model = model if model is not None else lora["model"] clip = clip if clip is not None else lora["clip"] lora_model_strength = lora["lora_model_strength"] lora_clip_strength = lora["lora_clip_strength"] if "lbw" in lora: lbw = lora["lbw"] lbw_a = lora["lbw_a"] lbw_b = lora["lbw_b"] cls = ALL_NODE_CLASS_MAPPINGS['LoraLoaderBlockWeight //Inspire'] model, clip, _ = cls().doit(model, clip, lora_name, lora_model_strength, lora_clip_strength, False, 0, lbw_a, lbw_b, "", lbw) model, clip = easyCache.load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength) # 处理clip clip = clip.clone() if plot_image_vars['clip_skip'] != 0: clip.clip_layer(plot_image_vars['clip_skip']) # 提示词 if "Positive" in self.x_type or "Positive" in self.y_type: if self.x_type == 'Positive Prompt S/R' or self.y_type == 'Positive Prompt S/R': positive = x_value if self.x_type == "Positive Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] positive, = cls().encode(clip, positive, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] positive, positive_pooled = advanced_encode(clip, positive, plot_image_vars['positive_token_normalization'], plot_image_vars[ 'positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] if "Negative" in self.x_type or "Negative" in self.y_type: if self.x_type == 'Negative Prompt S/R' or self.y_type == 'Negative Prompt S/R': negative = x_value if self.x_type == "Negative Prompt S/R" else y_value if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] clip = clip if clip is not None else plot_image_vars["clip"] negative, = cls().encode(clip, negative, "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception( f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: clip = clip if clip is not None else plot_image_vars["clip"] negative, negative_pooled = advanced_encode(clip, negative, plot_image_vars['negative_token_normalization'], plot_image_vars[ 'negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] # ControlNet if "ControlNet" in self.x_type or "ControlNet" in self.y_type: _pipe = { "model": model if model is not None else plot_image_vars["model"], "positive": positive if positive is not None else plot_image_vars["positive_cond"], "negative": negative if negative is not None else plot_image_vars["negative_cond"], "vae": vae if vae is not None else plot_image_vars['vae'], "clip": clip if clip is not None else plot_image_vars['clip'], "samples": None, "images": None, "loader_settings": {} } cnet = plot_image_vars["cnet"] if "cnet" in plot_image_vars else None if cnet: strength, start_percent, end_percent = x_value.split(',') if "ControlNet" in self.x_type else y_value.split(',') strength = float(strength) start_percent = float(start_percent) end_percent = float(end_percent) for index, item in enumerate(cnet): control_net_names = item[0] image = item[1] for idx, control_net_name in enumerate(control_net_names): # print(control_net_name) _pipe, = controlnetAdvanced().controlnetApply(_pipe, image, control_net_name, None, strength, start_percent, end_percent) positive = _pipe['positive'] negative = _pipe['negative'] del _pipe # 简单用法 if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader": model, clip, vae = easyCache.load_checkpoint(plot_image_vars['ckpt_name']) if plot_image_vars['lora_name'] != "None": model, clip = easyCache.load_lora(plot_image_vars['lora_name'], model, clip, plot_image_vars['lora_model_strength'], plot_image_vars['lora_clip_strength']) # Check for custom VAE if plot_image_vars['vae_name'] not in ["Baked-VAE", "Baked VAE"]: vae = easyCache.load_vae(plot_image_vars['vae_name']) # CLIP skip if not clip: raise Exception("No CLIP found") clip = clip.clone() clip.clip_layer(plot_image_vars['clip_skip']) if plot_image_vars['a1111_prompt_style']: if "smZ CLIPTextEncode" in ALL_NODE_CLASS_MAPPINGS: cls = ALL_NODE_CLASS_MAPPINGS['smZ CLIPTextEncode'] steps = plot_image_vars['steps'] positive, = cls().encode(clip, plot_image_vars['positive'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) negative, = cls().encode(clip, plot_image_vars['negative'], "A1111", True, True, False, False, 6, 1024, 1024, 0, 0, 1024, 1024, '', '', steps) else: raise Exception(f"[ERROR] To use clip text encode same as webui, you need to install 'smzNodes'") else: positive, positive_pooled = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") positive = [[positive, {"pooled_output": positive_pooled}]] negative, negative_pooled = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0, apply_to_pooled="enable") negative = [[negative, {"pooled_output": negative_pooled}]] model = model if model is not None else plot_image_vars["model"] clip = clip if clip is not None else plot_image_vars["clip"] vae = vae if vae is not None else plot_image_vars["vae"] positive = positive if positive is not None else plot_image_vars["positive_cond"] negative = negative if negative is not None else plot_image_vars["negative_cond"] seed = seed if seed is not None else plot_image_vars["seed"] steps = steps if steps is not None else plot_image_vars["steps"] cfg = cfg if cfg is not None else plot_image_vars["cfg"] sampler_name = sampler_name if sampler_name is not None else plot_image_vars["sampler_name"] scheduler = scheduler if scheduler is not None else plot_image_vars["scheduler"] denoise = denoise if denoise is not None else plot_image_vars["denoise"] # Sample samples = sampler.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, disable_noise=disable_noise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise) # Decode images and store latent = samples["samples"] # Add the latent tensor to the tensors list latents_plot.append(latent) # Decode the image image = vae.decode(latent).cpu() if self.output_individuals in [True, "True"]: easy_save = easySave(self.my_unique_id, self.prompt, self.extra_pnginfo) easy_save.images(image, self.save_prefix, self.image_output, group_id=self.num) # Convert the image from tensor to PIL Image and add it to the list pil_image = easySampler.tensor2pil(image) image_list.append(pil_image) # Update max dimensions self.max_width = max(self.max_width, pil_image.width) self.max_height = max(self.max_height, pil_image.height) # Return the touched variables return image_list, self.max_width, self.max_height, latents_plot # Process Functions def validate_xy_plot(self): if self.x_type == 'None' and self.y_type == 'None': log_node_warn(f'easyKsampler[{self.my_unique_id}]','No Valid Plot Types - Reverting to default sampling...') return False else: return True def get_latent(self, samples): # Extract the 'samples' tensor from the dictionary latent_image_tensor = samples["samples"] # Split the tensor into individual image tensors image_tensors = torch.split(latent_image_tensor, 1, dim=0) # Create a list of dictionaries containing the individual image tensors latent_list = [{'samples': image} for image in image_tensors] # Set latent only to the first latent of batch if self.latent_id >= len(latent_list): log_node_warn(f'easy kSampler[{self.my_unique_id}]',f'The selected latent_id ({self.latent_id}) is out of range.') log_node_warn(f'easy kSampler[{self.my_unique_id}]', f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).') self.latent_id = len(latent_list) - 1 return latent_list[self.latent_id] def get_labels_and_sample(self, plot_image_vars, latent_image, preview_latent, start_step, last_step, force_full_denoise, disable_noise): for x_index, x_value in enumerate(self.x_values): plot_image_vars, x_value_label = self.define_variable(plot_image_vars, self.x_type, x_value, x_index) self.x_label = self.update_label(self.x_label, x_value_label, len(self.x_values)) if self.y_type != 'None': for y_index, y_value in enumerate(self.y_values): plot_image_vars, y_value_label = self.define_variable(plot_image_vars, self.y_type, y_value, y_index) self.y_label = self.update_label(self.y_label, y_value_label, len(self.y_values)) # ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t( # f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value, y_value) self.num += 1 else: # ttNl(f'{CC.GREY}X: {x_value_label}').t(f'Plot Values {self.num}/{self.total} ->').p() self.image_list, self.max_width, self.max_height, self.latents_plot = self.sample_plot_image( plot_image_vars, latent_image, preview_latent, self.latents_plot, self.image_list, disable_noise, start_step, last_step, force_full_denoise, x_value) self.num += 1 # Rearrange latent array to match preview image grid self.latents_plot = self.rearrange_tensors(self.latents_plot, self.num_cols, self.num_rows) # Concatenate the tensors along the first dimension (dim=0) self.latents_plot = torch.cat(self.latents_plot, dim=0) return self.latents_plot def plot_images_and_labels(self): # Calculate the background dimensions bg_width, bg_height, x_offset_initial, y_offset = self.calculate_background_dimensions() # Create the white background image background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255)) output_image = [] for row_index in range(self.num_rows): x_offset = x_offset_initial for col_index in range(self.num_cols): index = col_index * self.num_rows + row_index img = self.image_list[index] output_image.append(sampler.pil2tensor(img)) background.paste(img, (x_offset, y_offset)) # Handle X label if row_index == 0 and self.x_type != "None": label_bg = self.create_label(img, self.x_label[col_index], int(48 * img.width / 512)) label_y = (y_offset - label_bg.height) // 2 background.alpha_composite(label_bg, (x_offset, label_y)) # Handle Y label if col_index == 0 and self.y_type != "None": label_bg = self.create_label(img, self.y_label[row_index], int(48 * img.height / 512), False) label_bg = label_bg.rotate(90, expand=True) label_x = (x_offset - label_bg.width) // 2 label_y = y_offset + (img.height - label_bg.height) // 2 background.alpha_composite(label_bg, (label_x, label_y)) x_offset += img.width + self.grid_spacing y_offset += img.height + self.grid_spacing return (sampler.pil2tensor(background), output_image) easyCache = easyLoader() sampler = easySampler() def check_link_to_clip(node_id, clip_id, visited=None, node=None): """Check if a given node links directly or indirectly to a loader node.""" if visited is None: visited = set() if node_id in visited: return False visited.add(node_id) if "pipe" in node["inputs"]: link_ids = node["inputs"]["pipe"] for id in link_ids: if id != 0 and id == str(clip_id): return True return False def find_nearest_steps(clip_id, prompt): """Find the nearest KSampler or preSampling node that references the given id.""" for id in prompt: node = prompt[id] if "Sampler" in node["class_type"] or "sampler" in node["class_type"] or "Sampling" in node["class_type"]: # Check if this KSampler node directly or indirectly references the given CLIPTextEncode node if check_link_to_clip(id, clip_id, None, node): steps = node["inputs"]["steps"] if "steps" in node["inputs"] else 1 return steps return 1 def find_wildcards_seed(text, prompt): if "__" in text: for i in prompt: if "wildcards" in prompt[i]['class_type'] and text == prompt[i]['inputs']['text']: return prompt[i]['inputs']['seed_num'] if "seed_num" in prompt[i]['inputs'] else None else: return None class easySave: def __init__(self, my_unique_id=0, prompt=None, extra_pnginfo=None, number_padding=5, overwrite_existing=False, output_dir=folder_paths.get_temp_directory()): self.number_padding = int(number_padding) if number_padding not in [None, "None", 0] else None self.overwrite_existing = overwrite_existing self.my_unique_id = my_unique_id self.prompt = prompt self.extra_pnginfo = extra_pnginfo self.type = 'temp' self.output_dir = output_dir if self.output_dir != folder_paths.get_temp_directory(): self.output_dir = self.folder_parser(self.output_dir, self.prompt, self.my_unique_id) if not os.path.exists(self.output_dir): self._create_directory(self.output_dir) @staticmethod def _create_directory(folder: str): """Try to create the directory and log the status.""" log_node_warn("", f"Folder {folder} does not exist. Attempting to create...") if not os.path.exists(folder): try: os.makedirs(folder) log_node_success("",f"{folder} Created Successfully") except OSError:
log_node_error(f"Failed to create folder {folder}")
4
2023-12-10 07:02:36+00:00
16k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/data/build.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "BoxMode", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "get_world_size", "path": "nativedancer/third_part/detectron2/utils/comm.py", "snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "seed_all_rng", "path": "nativedancer/third_part/detectron2/utils/env.py", "snippet": "def seed_all_rng(seed=None):\n \"\"\"\n Set the random seed for the RNG in torch, numpy and python.\n\n Args:\n seed (int): if None, will use a strong random seed.\n \"\"\"\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger = logging.getLogger(__name__)\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)" }, { "identifier": "PathManager", "path": "nativedancer/third_part/detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "_log_api_usage", "path": "nativedancer/third_part/detectron2/utils/logger.py", "snippet": "def _log_api_usage(identifier: str):\n \"\"\"\n Internal function used to log the usage of different detectron2 components\n inside facebook's infra.\n \"\"\"\n torch._C._log_api_usage_once(\"detectron2.\" + identifier)" }, { "identifier": "log_first_n", "path": "nativedancer/third_part/detectron2/utils/logger.py", "snippet": "def log_first_n(lvl, msg, n=1, *, name=None, key=\"caller\"):\n \"\"\"\n Log only for the first n times.\n\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n key (str or tuple[str]): the string(s) can be one of \"caller\" or\n \"message\", which defines how to identify duplicated logs.\n For example, if called with `n=1, key=\"caller\"`, this function\n will only log the first call from the same caller, regardless of\n the message content.\n If called with `n=1, key=\"message\"`, this function will log the\n same content only once, even if they are called from different places.\n If called with `n=1, key=(\"caller\", \"message\")`, this function\n will not log only if the same caller has logged the same message before.\n \"\"\"\n if isinstance(key, str):\n key = (key,)\n assert len(key) > 0\n\n caller_module, caller_key = _find_caller()\n hash_key = ()\n if \"caller\" in key:\n hash_key = hash_key + caller_key\n if \"message\" in key:\n hash_key = hash_key + (msg,)\n\n _LOG_COUNTER[hash_key] += 1\n if _LOG_COUNTER[hash_key] <= n:\n logging.getLogger(name or caller_module).log(lvl, msg)" }, { "identifier": "DatasetCatalog", "path": "nativedancer/third_part/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "AspectRatioGroupedDataset", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class AspectRatioGroupedDataset(data.IterableDataset):\n \"\"\"\n Batch data that have similar aspect ratio together.\n In this implementation, images whose aspect ratio < (or >) 1 will\n be batched together.\n This improves training speed because the images then need less padding\n to form a batch.\n\n It assumes the underlying dataset produces dicts with \"width\" and \"height\" keys.\n It will then produce a list of original dicts with length = batch_size,\n all with similar aspect ratios.\n \"\"\"\n\n def __init__(self, dataset, batch_size):\n \"\"\"\n Args:\n dataset: an iterable. Each element must be a dict with keys\n \"width\" and \"height\", which will be used to batch data.\n batch_size (int):\n \"\"\"\n self.dataset = dataset\n self.batch_size = batch_size\n self._buckets = [[] for _ in range(2)]\n # Hard-coded two aspect ratio groups: w > h and w < h.\n # Can add support for more aspect ratio groups, but doesn't seem useful\n\n def __iter__(self):\n for d in self.dataset:\n w, h = d[\"width\"], d[\"height\"]\n bucket_id = 0 if w > h else 1\n bucket = self._buckets[bucket_id]\n bucket.append(d)\n if len(bucket) == self.batch_size:\n data = bucket[:]\n # Clear bucket first, because code after yield is not\n # guaranteed to execute\n del bucket[:]\n yield data" }, { "identifier": "DatasetFromList", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class DatasetFromList(data.Dataset):\n \"\"\"\n Wrap a list to a torch Dataset. It produces elements of the list as data.\n \"\"\"\n\n def __init__(\n self,\n lst: list,\n copy: bool = True,\n serialize: Union[bool, Callable] = True,\n ):\n \"\"\"\n Args:\n lst (list): a list which contains elements to produce.\n copy (bool): whether to deepcopy the element when producing it,\n so that the result can be modified in place without affecting the\n source in the list.\n serialize (bool or callable): whether to serialize the stroage to other\n backend. If `True`, the default serialize method will be used, if given\n a callable, the callable will be used as serialize method.\n \"\"\"\n self._lst = lst\n self._copy = copy\n if not isinstance(serialize, (bool, Callable)):\n raise TypeError(f\"Unsupported type for argument `serailzie`: {serialize}\")\n self._serialize = serialize is not False\n\n if self._serialize:\n serialize_method = (\n serialize\n if isinstance(serialize, Callable)\n else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD\n )\n logger.info(f\"Serializing the dataset using: {serialize_method}\")\n self._lst = serialize_method(self._lst)\n\n def __len__(self):\n return len(self._lst)\n\n def __getitem__(self, idx):\n if self._copy and not self._serialize:\n return copy.deepcopy(self._lst[idx])\n else:\n return self._lst[idx]" }, { "identifier": "MapDataset", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class MapDataset(data.Dataset):\n \"\"\"\n Map a function over the elements in a dataset.\n \"\"\"\n\n def __init__(self, dataset, map_func):\n \"\"\"\n Args:\n dataset: a dataset where map function is applied. Can be either\n map-style or iterable dataset. When given an iterable dataset,\n the returned object will also be an iterable dataset.\n map_func: a callable which maps the element in dataset. map_func can\n return None to skip the data (e.g. in case of errors).\n How None is handled depends on the style of `dataset`.\n If `dataset` is map-style, it randomly tries other elements.\n If `dataset` is iterable, it skips the data and tries the next.\n \"\"\"\n self._dataset = dataset\n self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work\n\n self._rng = random.Random(42)\n self._fallback_candidates = set(range(len(dataset)))\n\n def __new__(cls, dataset, map_func):\n is_iterable = isinstance(dataset, data.IterableDataset)\n if is_iterable:\n return _MapIterableDataset(dataset, map_func)\n else:\n return super().__new__(cls)\n\n def __getnewargs__(self):\n return self._dataset, self._map_func\n\n def __len__(self):\n return len(self._dataset)\n\n def __getitem__(self, idx):\n retry_count = 0\n cur_idx = int(idx)\n\n while True:\n data = self._map_func(self._dataset[cur_idx])\n if data is not None:\n self._fallback_candidates.add(cur_idx)\n return data\n\n # _map_func fails for this idx, use a random new index from the pool\n retry_count += 1\n self._fallback_candidates.discard(cur_idx)\n cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]\n\n if retry_count >= 3:\n logger = logging.getLogger(__name__)\n logger.warning(\n \"Failed to apply `_map_func` for idx: {}, retry count: {}\".format(\n idx, retry_count\n )\n )" }, { "identifier": "ToIterableDataset", "path": "nativedancer/third_part/detectron2/data/common.py", "snippet": "class ToIterableDataset(data.IterableDataset):\n \"\"\"\n Convert an old indices-based (also called map-style) dataset\n to an iterable-style dataset.\n \"\"\"\n\n def __init__(\n self,\n dataset: data.Dataset,\n sampler: Sampler,\n shard_sampler: bool = True,\n shard_chunk_size: int = 1,\n ):\n \"\"\"\n Args:\n dataset: an old-style dataset with ``__getitem__``\n sampler: a cheap iterable that produces indices to be applied on ``dataset``.\n shard_sampler: whether to shard the sampler based on the current pytorch data loader\n worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple\n workers, it is responsible for sharding its data based on worker id so that workers\n don't produce identical data.\n\n Most samplers (like our TrainingSampler) do not shard based on dataloader worker id\n and this argument should be set to True. But certain samplers may be already\n sharded, in that case this argument should be set to False.\n shard_chunk_size: when sharding the sampler, each worker will\n \"\"\"\n assert not isinstance(dataset, data.IterableDataset), dataset\n assert isinstance(sampler, Sampler), sampler\n self.dataset = dataset\n self.sampler = sampler\n self.shard_sampler = shard_sampler\n self.shard_chunk_size = shard_chunk_size\n\n def __iter__(self):\n if not self.shard_sampler:\n sampler = self.sampler\n else:\n # With map-style dataset, `DataLoader(dataset, sampler)` runs the\n # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`\n # will run sampler in every of the N worker. So we should only keep 1/N of the ids on\n # each worker. The assumption is that sampler is cheap to iterate so it's fine to\n # discard ids in workers.\n sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)\n for idx in sampler:\n yield self.dataset[idx]\n\n def __len__(self):\n return len(self.sampler)" }, { "identifier": "DatasetMapper", "path": "nativedancer/third_part/detectron2/data/dataset_mapper.py", "snippet": "class DatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n use_instance_mask: bool = False,\n use_keypoint: bool = False,\n instance_mask_format: str = \"polygon\",\n keypoint_hflip_indices: Optional[np.ndarray] = None,\n precomputed_proposal_topk: Optional[int] = None,\n recompute_boxes: bool = False,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n use_instance_mask: whether to process instance segmentation annotations, if available\n use_keypoint: whether to process keypoint annotations if available\n instance_mask_format: one of \"polygon\" or \"bitmask\". Process instance segmentation\n masks into this format.\n keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`\n precomputed_proposal_topk: if given, will load pre-computed\n proposals from dataset_dict and keep the top k proposals for each image.\n recompute_boxes: whether to overwrite bounding box annotations\n by computing tight bounding boxes from instance mask annotations.\n \"\"\"\n if recompute_boxes:\n assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = T.AugmentationList(augmentations)\n self.image_format = image_format\n self.use_instance_mask = use_instance_mask\n self.instance_mask_format = instance_mask_format\n self.use_keypoint = use_keypoint\n self.keypoint_hflip_indices = keypoint_hflip_indices\n self.proposal_topk = precomputed_proposal_topk\n self.recompute_boxes = recompute_boxes\n # fmt: on\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = utils.build_augmentation(cfg, is_train)\n if cfg.INPUT.CROP.ENABLED and is_train:\n augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))\n recompute_boxes = cfg.MODEL.MASK_ON\n else:\n recompute_boxes = False\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"instance_mask_format\": cfg.INPUT.MASK_FORMAT,\n \"use_keypoint\": cfg.MODEL.KEYPOINT_ON,\n \"recompute_boxes\": recompute_boxes,\n }\n\n if cfg.MODEL.KEYPOINT_ON:\n ret[\"keypoint_hflip_indices\"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)\n\n if cfg.MODEL.LOAD_PROPOSALS:\n ret[\"precomputed_proposal_topk\"] = (\n cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN\n if is_train\n else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST\n )\n return ret\n\n def _transform_annotations(self, dataset_dict, transforms, image_shape):\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n if not self.use_instance_mask:\n anno.pop(\"segmentation\", None)\n if not self.use_keypoint:\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(\n obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices\n )\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n instances = utils.annotations_to_instances(\n annos, image_shape, mask_format=self.instance_mask_format\n )\n\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n if self.recompute_boxes:\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n dataset_dict[\"instances\"] = utils.filter_empty_instances(instances)\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n transforms = self.augmentations(aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n image_shape = image.shape[:2] # h, w\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n # USER: Remove if you don't use pre-computed proposals.\n # Most users would not need this feature.\n if self.proposal_topk is not None:\n utils.transform_proposals(\n dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk\n )\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n self._transform_annotations(dataset_dict, transforms, image_shape)\n\n return dataset_dict" }, { "identifier": "check_metadata_consistency", "path": "nativedancer/third_part/detectron2/data/detection_utils.py", "snippet": "def check_metadata_consistency(key, dataset_names):\n \"\"\"\n Check that the datasets have consistent metadata.\n\n Args:\n key (str): a metadata key\n dataset_names (list[str]): a list of dataset names\n\n Raises:\n AttributeError: if the key does not exist in the metadata\n ValueError: if the given datasets do not have the same metadata values defined by key\n \"\"\"\n if len(dataset_names) == 0:\n return\n logger = logging.getLogger(__name__)\n entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]\n for idx, entry in enumerate(entries_per_dataset):\n if entry != entries_per_dataset[0]:\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(key, dataset_names[idx], str(entry))\n )\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(\n key, dataset_names[0], str(entries_per_dataset[0])\n )\n )\n raise ValueError(\"Datasets have different metadata '{}'!\".format(key))" }, { "identifier": "InferenceSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class InferenceSampler(Sampler):\n \"\"\"\n Produce indices for inference across all workers.\n Inference needs to run on the __exact__ set of samples,\n therefore when the total number of samples is not divisible by the number of workers,\n this sampler produces different number of samples on different workers.\n \"\"\"\n\n def __init__(self, size: int):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n \"\"\"\n self._size = size\n assert size > 0\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n self._local_indices = self._get_local_indices(size, self._world_size, self._rank)\n\n @staticmethod\n def _get_local_indices(total_size, world_size, rank):\n shard_size = total_size // world_size\n left = total_size % world_size\n shard_sizes = [shard_size + int(r < left) for r in range(world_size)]\n\n begin = sum(shard_sizes[:rank])\n end = min(sum(shard_sizes[: rank + 1]), total_size)\n return range(begin, end)\n\n def __iter__(self):\n yield from self._local_indices\n\n def __len__(self):\n return len(self._local_indices)" }, { "identifier": "RandomSubsetTrainingSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class RandomSubsetTrainingSampler(TrainingSampler):\n \"\"\"\n Similar to TrainingSampler, but only sample a random subset of indices.\n This is useful when you want to estimate the accuracy vs data-number curves by\n training the model with different subset_ratio.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n subset_ratio: float,\n shuffle: bool = True,\n seed_shuffle: Optional[int] = None,\n seed_subset: Optional[int] = None,\n ):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n subset_ratio (float): the ratio of subset data to sample from the underlying dataset\n shuffle (bool): whether to shuffle the indices or not\n seed_shuffle (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n seed_subset (int): the seed to randomize the subset to be sampled.\n Must be the same across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)\n\n assert 0.0 < subset_ratio <= 1.0\n self._size_subset = int(size * subset_ratio)\n assert self._size_subset > 0\n if seed_subset is None:\n seed_subset = comm.shared_random_seed()\n self._seed_subset = int(seed_subset)\n\n # randomly generate the subset indexes to be sampled from\n g = torch.Generator()\n g.manual_seed(self._seed_subset)\n indexes_randperm = torch.randperm(self._size, generator=g)\n self._indexes_subset = indexes_randperm[: self._size_subset]\n\n logger.info(\"Using RandomSubsetTrainingSampler......\")\n logger.info(f\"Randomly sample {self._size_subset} data from the original {self._size} data\")\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()\n while True:\n if self._shuffle:\n # generate a random permutation to shuffle self._indexes_subset\n randperm = torch.randperm(self._size_subset, generator=g)\n yield from self._indexes_subset[randperm].tolist()\n else:\n yield from self._indexes_subset.tolist()" }, { "identifier": "RepeatFactorTrainingSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class RepeatFactorTrainingSampler(Sampler):\n \"\"\"\n Similar to TrainingSampler, but a sample may appear more times than others based\n on its \"repeat factor\". This is suitable for training on class imbalanced datasets like LVIS.\n \"\"\"\n\n def __init__(self, repeat_factors, *, shuffle=True, seed=None):\n \"\"\"\n Args:\n repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's\n full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n # Split into whole number (_int_part) and fractional (_frac_part) parts.\n self._int_part = torch.trunc(repeat_factors)\n self._frac_part = repeat_factors - self._int_part\n\n @staticmethod\n def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):\n \"\"\"\n Compute (fractional) per-image repeat factors based on category frequency.\n The repeat factor for an image is a function of the frequency of the rarest\n category labeled in that image. The \"frequency of category c\" in [0, 1] is defined\n as the fraction of images in the training set (without repeats) in which category c\n appears.\n See :paper:`lvis` (>= v2) Appendix B.2.\n\n Args:\n dataset_dicts (list[dict]): annotations in Detectron2 dataset format.\n repeat_thresh (float): frequency threshold below which data is repeated.\n If the frequency is half of `repeat_thresh`, the image will be\n repeated twice.\n\n Returns:\n torch.Tensor:\n the i-th element is the repeat factor for the dataset image at index i.\n \"\"\"\n # 1. For each category c, compute the fraction of images that contain it: f(c)\n category_freq = defaultdict(int)\n for dataset_dict in dataset_dicts: # For each image (without repeats)\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n for cat_id in cat_ids:\n category_freq[cat_id] += 1\n num_images = len(dataset_dicts)\n for k, v in category_freq.items():\n category_freq[k] = v / num_images\n\n # 2. For each category c, compute the category-level repeat factor:\n # r(c) = max(1, sqrt(t / f(c)))\n category_rep = {\n cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))\n for cat_id, cat_freq in category_freq.items()\n }\n\n # 3. For each image I, compute the image-level repeat factor:\n # r(I) = max_{c in I} r(c)\n rep_factors = []\n for dataset_dict in dataset_dicts:\n cat_ids = {ann[\"category_id\"] for ann in dataset_dict[\"annotations\"]}\n rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)\n rep_factors.append(rep_factor)\n\n return torch.tensor(rep_factors, dtype=torch.float32)\n\n def _get_epoch_indices(self, generator):\n \"\"\"\n Create a list of dataset indices (with repeats) to use for one epoch.\n\n Args:\n generator (torch.Generator): pseudo random number generator used for\n stochastic rounding.\n\n Returns:\n torch.Tensor: list of dataset indices to use in one epoch. Each index\n is repeated based on its calculated repeat factor.\n \"\"\"\n # Since repeat factors are fractional, we use stochastic rounding so\n # that the target repeat factor is achieved in expectation over the\n # course of training\n rands = torch.rand(len(self._frac_part), generator=generator)\n rep_factors = self._int_part + (rands < self._frac_part).float()\n # Construct a list of indices in which we repeat images as specified\n indices = []\n for dataset_index, rep_factor in enumerate(rep_factors):\n indices.extend([dataset_index] * int(rep_factor.item()))\n return torch.tensor(indices, dtype=torch.int64)\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n # Sample indices with repeats determined by stochastic rounding; each\n # \"epoch\" may have a slightly different size due to the rounding.\n indices = self._get_epoch_indices(g)\n if self._shuffle:\n randperm = torch.randperm(len(indices), generator=g)\n yield from indices[randperm].tolist()\n else:\n yield from indices.tolist()" }, { "identifier": "TrainingSampler", "path": "nativedancer/third_part/detectron2/data/samplers/distributed_sampler.py", "snippet": "class TrainingSampler(Sampler):\n \"\"\"\n In training, we only care about the \"infinite stream\" of training data.\n So this sampler produces an infinite stream of indices and\n all workers cooperate to correctly shuffle the indices and sample different indices.\n\n The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n where `indices` is an infinite stream of indices consisting of\n `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n or `range(size) + range(size) + ...` (if shuffle is False)\n\n Note that this sampler does not shard based on pytorch DataLoader worker id.\n A sampler passed to pytorch DataLoader is used only with map-style dataset\n and will not be executed inside workers.\n But if this sampler is used in a way that it gets execute inside a dataloader\n worker, then extra work needs to be done to shard its outputs based on worker id.\n This is required so that workers don't produce identical data.\n :class:`ToIterableDataset` implements this logic.\n This note is true for all samplers in detectron2.\n \"\"\"\n\n def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):\n \"\"\"\n Args:\n size (int): the total number of data of the underlying dataset to sample from\n shuffle (bool): whether to shuffle the indices or not\n seed (int): the initial seed of the shuffle. Must be the same\n across all workers. If None, will use a random seed shared\n among workers (require synchronization among all workers).\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(f\"TrainingSampler(size=) expects an int. Got type {type(size)}.\")\n if size <= 0:\n raise ValueError(f\"TrainingSampler(size=) expects a positive int. Got {size}.\")\n self._size = size\n self._shuffle = shuffle\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n g = torch.Generator()\n g.manual_seed(self._seed)\n while True:\n if self._shuffle:\n yield from torch.randperm(self._size, generator=g).tolist()\n else:\n yield from torch.arange(self._size).tolist()" } ]
import itertools import logging import numpy as np import operator import pickle import torch import torch.utils.data as torchdata from collections import OrderedDict, defaultdict from typing import Any, Callable, Dict, List, Optional, Union from tabulate import tabulate from termcolor import colored from ..config import configurable from ..structures import BoxMode from ..utils.comm import get_world_size from ..utils.env import seed_all_rng from ..utils.file_io import PathManager from ..utils.logger import _log_api_usage, log_first_n from .catalog import DatasetCatalog, MetadataCatalog from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset from .dataset_mapper import DatasetMapper from .detection_utils import check_metadata_consistency from .samplers import ( InferenceSampler, RandomSubsetTrainingSampler, RepeatFactorTrainingSampler, TrainingSampler, )
12,525
table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains the default logic to build a dataloader for training or testing. """ __all__ = [ "build_batch_data_loader", "build_detection_train_loader", "build_detection_test_loader", "get_detection_dataset_dicts", "load_proposals_into_dataset", "print_instances_class_histogram", ] def filter_images_with_only_crowd_annotations(dataset_dicts): """ Filter out images with none annotations or only crowd annotations (i.e., images without non-crowd annotations). A common training-time preprocessing on COCO dataset. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): for ann in anns: if ann.get("iscrowd", 0) == 0: return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with no usable annotations. {} images left.".format( num_before - num_after, num_after ) ) return dataset_dicts def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): """ Filter out images with too few number of keypoints. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format as dataset_dicts, but filtered. """ num_before = len(dataset_dicts) def visible_keypoints_in_image(dic): # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility annotations = dic["annotations"] return sum( (np.array(ann["keypoints"][2::3]) > 0).sum() for ann in annotations if "keypoints" in ann ) dataset_dicts = [ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image ] num_after = len(dataset_dicts) logger = logging.getLogger(__name__) logger.info( "Removed {} images with fewer than {} keypoints.".format( num_before - num_after, min_keypoints_per_image ) ) return dataset_dicts def load_proposals_into_dataset(dataset_dicts, proposal_file): """ Load precomputed object proposals into the dataset. The proposal file should be a pickled dict with the following keys: - "ids": list[int] or list[str], the image ids - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores corresponding to the boxes. - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``. Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. proposal_file (str): file path of pre-computed proposals, in pkl format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger = logging.getLogger(__name__) logger.info("Loading proposals from: {}".format(proposal_file)) with PathManager.open(proposal_file, "rb") as f: proposals = pickle.load(f, encoding="latin1") # Rename the key names in D1 proposal files rename_keys = {"indexes": "ids", "scores": "objectness_logits"} for key in rename_keys: if key in proposals: proposals[rename_keys[key]] = proposals.pop(key) # Fetch the indexes of all proposals that are in the dataset # Convert image_id to str since they could be int. img_ids = set({str(record["image_id"]) for record in dataset_dicts}) id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids} # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS for record in dataset_dicts: # Get the index of the proposal i = id_to_index[str(record["image_id"])] boxes = proposals["boxes"][i] objectness_logits = proposals["objectness_logits"][i] # Sort the proposals in descending order of the scores inds = objectness_logits.argsort()[::-1] record["proposal_boxes"] = boxes[inds] record["proposal_objectness_logits"] = objectness_logits[inds] record["proposal_bbox_mode"] = bbox_mode return dataset_dicts def print_instances_class_histogram(dataset_dicts, class_names): """ Args: dataset_dicts (list[dict]): list of dataset dicts. class_names (list[str]): list of class names (zero-indexed). """ num_classes = len(class_names) hist_bins = np.arange(num_classes + 1) histogram = np.zeros((num_classes,), dtype=int) for entry in dataset_dicts: annos = entry["annotations"] classes = np.asarray( [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int ) if len(classes): assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}" assert ( classes.max() < num_classes ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes" histogram += np.histogram(classes, bins=hist_bins)[0] N_COLS = min(6, len(class_names) * 2) def short_name(x): # make long class names shorter. useful for lvis if len(x) > 13: return x[:11] + ".." return x data = list( itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) ) total_num_instances = sum(data[1::2]) data.extend([None] * (N_COLS - (len(data) % N_COLS))) if num_classes > 1: data.extend(["total", total_num_instances]) data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) table = tabulate( data, headers=["category", "#instances"] * (N_COLS // 2), tablefmt="pipe", numalign="left", stralign="center", ) log_first_n( logging.INFO, "Distribution of instances among all {} categories:\n".format(num_classes) + colored(table, "cyan"), key="message", ) def get_detection_dataset_dicts( names, filter_empty=True, min_keypoints=0, proposal_files=None, check_consistency=True, ): """ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. Args: names (str or list[str]): a dataset name or a list of dataset names filter_empty (bool): whether to filter out images without instance annotations min_keypoints (int): filter out images with fewer keypoints than `min_keypoints`. Set to 0 to do nothing. proposal_files (list[str]): if given, a list of object proposal files that match each dataset in `names`. check_consistency (bool): whether to check if datasets have consistent metadata. Returns: list[dict]: a list of dicts following the standard dataset dict format. """ if isinstance(names, str): names = [names] assert len(names), names available_datasets = DatasetCatalog.keys() names_set = set(names) if not names_set.issubset(available_datasets): logger = logging.getLogger(__name__) logger.warning( "The following dataset names are not registered in the DatasetCatalog: " f"{names_set - available_datasets}. " f"Available datasets are {available_datasets}" ) dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names] if isinstance(dataset_dicts[0], torchdata.Dataset): if len(dataset_dicts) > 1: # ConcatDataset does not work for iterable style dataset. # We could support concat for iterable as well, but it's often # not a good idea to concat iterables anyway. return torchdata.ConcatDataset(dataset_dicts) return dataset_dicts[0] for dataset_name, dicts in zip(names, dataset_dicts): assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) if proposal_files is not None: assert len(names) == len(proposal_files) # load precomputed proposals from proposal files dataset_dicts = [ load_proposals_into_dataset(dataset_i_dicts, proposal_file) for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) ] dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) has_instances = "annotations" in dataset_dicts[0] if filter_empty and has_instances: dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) if min_keypoints > 0 and has_instances: dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) if check_consistency and has_instances: try: class_names = MetadataCatalog.get(names[0]).thing_classes check_metadata_consistency("thing_classes", names) print_instances_class_histogram(dataset_dicts, class_names) except AttributeError: # class names are not available for this dataset pass assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names)) return dataset_dicts def build_batch_data_loader( dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0, collate_fn=None, drop_last: bool = True, **kwargs, ): """ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are: 1. support aspect ratio grouping options 2. use no "batch collation", because this is common for detection training Args: dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset. sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices. Must be provided iff. ``dataset`` is a map-style dataset. total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see :func:`build_detection_train_loader`. drop_last (bool): if ``True``, the dataloader will drop incomplete batches. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """
world_size = get_world_size()
2
2023-12-10 20:14:00+00:00
16k
ethanweber/nerfiller
nerfiller/scripts/inpaint_nerfstudio_dataset.py
[ { "identifier": "RGBInpainter", "path": "nerfiller/inpaint/rgb_inpainter.py", "snippet": "class RGBInpainter:\n \"\"\"\n Module for inpainting with the stable diffusion inpainting pipeline.\n \"\"\"\n\n def __init__(\n self,\n half_precision_weights: bool = True,\n lora_model_path: Optional[str] = None,\n device: str = \"cuda:0\",\n vae_device: str = \"cuda:0\",\n pipeline_name: str = \"stabilityai/stable-diffusion-2-inpainting\",\n ):\n print(f\"Loading RGB Inpainter ...\")\n\n self.half_precision_weights = half_precision_weights\n self.lora_model_path = lora_model_path\n self.device = device\n self.vae_device = vae_device\n self.dtype = torch.float16 if self.half_precision_weights else torch.float32\n self.pipeline_name = pipeline_name\n self.set_pipe()\n self.setup()\n\n def set_pipe(self):\n pipe_kwargs = {\n \"safety_checker\": None,\n \"feature_extractor\": None,\n \"requires_safety_checker\": False,\n \"torch_dtype\": self.dtype,\n }\n self.pipe = StableDiffusionInpaintPipeline.from_pretrained(\n self.pipeline_name,\n **pipe_kwargs,\n )\n\n def setup(self):\n # Load LoRA\n if self.lora_model_path:\n self.pipe.load_lora_weights(self.lora_model_path)\n print(f\"Loaded LoRA model from {self.lora_model_path}\")\n\n self.tokenizer = self.pipe.tokenizer\n self.text_encoder = self.pipe.text_encoder.to(self.device).eval()\n\n self.unet = self.pipe.unet.to(self.device).eval()\n self.vae = self.pipe.vae.to(self.vae_device).eval()\n\n self.vae_scale_factor = 2 ** (len(self.pipe.vae.config.block_out_channels) - 1)\n self.vae_latent_channels = self.pipe.vae.config.latent_channels\n\n # self.scheduler = DDPMScheduler.from_config(self.pipe.scheduler.config)\n self.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)\n self.num_train_timesteps = self.scheduler.num_train_timesteps\n self.alphas = self.scheduler.alphas_cumprod.to(self.device)\n\n del self.pipe\n cleanup()\n\n print(f\"Loaded RGB inpainter!\")\n\n def compute_text_embeddings(self, prompt: str, negative_prompt: str):\n \"\"\"Get the text embeddings for a string.\"\"\"\n assert self.tokenizer is not None\n assert self.text_encoder is not None\n with torch.no_grad():\n text_inputs = tokenize_prompt(self.tokenizer, prompt, tokenizer_max_length=None)\n prompt_embeds = encode_prompt(\n self.text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n negative_text_inputs = tokenize_prompt(self.tokenizer, negative_prompt, tokenizer_max_length=None)\n negative_prompt_embeds = encode_prompt(\n self.text_encoder,\n negative_text_inputs.input_ids,\n negative_text_inputs.attention_mask,\n text_encoder_use_attention_mask=False,\n )\n\n return [prompt_embeds, negative_prompt_embeds]\n\n def destroy_text_encoder(self) -> None:\n \"\"\"Delete the text modules to save on memory.\"\"\"\n del self.tokenizer\n del self.text_encoder\n cleanup()\n\n def forward_unet(\n self,\n sample,\n t,\n text_embeddings,\n denoise_in_grid: bool = False,\n ):\n # process embeddings\n prompt_embeds, negative_prompt_embeds = text_embeddings\n\n batch_size = sample.shape[0] // 3\n\n prompt_embeds = torch.cat(\n [\n prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n negative_prompt_embeds.repeat(batch_size, 1, 1),\n ]\n )\n\n if denoise_in_grid:\n grid_sample = make_grid(sample)\n grid_prompt_embeds = prompt_embeds[:3].repeat(grid_sample.shape[0] // 3, 1, 1)\n noise_pred = self.unet(\n sample=grid_sample,\n timestep=t,\n encoder_hidden_states=grid_prompt_embeds,\n return_dict=False,\n )[0]\n noise_pred = undo_grid(noise_pred)\n else:\n noise_pred = self.unet(\n sample=sample,\n timestep=t,\n encoder_hidden_states=prompt_embeds,\n return_dict=False,\n )[0]\n return noise_pred\n\n def get_noise_pred(\n self,\n t,\n model_input: ModelInput,\n text_embeddings,\n text_guidance_scale: float = 0.0,\n image_guidance_scale: float = 0.0,\n denoise_in_grid: bool = False,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n only_noise_pred: bool = False,\n ):\n assert self.scheduler.config.prediction_type == \"epsilon\", \"We assume the model predicts epsilon.\"\n\n batch_size = model_input.latents.shape[0]\n value = torch.zeros_like(model_input.latents)\n count = torch.zeros_like(model_input.latents)\n\n for i in range(multidiffusion_steps):\n if randomize_latents:\n indices = torch.randperm(batch_size)\n else:\n indices = torch.arange(batch_size)\n\n if denoise_in_grid and randomize_within_grid:\n for j in range(0, len(indices), 4):\n indices[j : j + 4] = indices[j : j + 4][torch.randperm(4)]\n\n latents = model_input.latents[indices]\n latents_mask = model_input.latents_mask[indices]\n latents_mask_uncond = model_input.latents_mask_uncond[indices]\n masked_image_latents = model_input.masked_image_latents[indices]\n masked_image_latents_uncond = model_input.masked_image_latents_uncond[indices]\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents, latents, latents])\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n latents_mask_input = torch.cat([latents_mask, latents_mask, latents_mask_uncond])\n masked_image_latents_input = torch.cat(\n [\n masked_image_latents,\n masked_image_latents,\n masked_image_latents_uncond,\n ]\n )\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input_cat = torch.cat(\n [latent_model_input, latents_mask_input, masked_image_latents_input],\n dim=1,\n )\n\n # TODO: save compute by skipping some text encodings if not using them in CFG\n\n noise_pred_all = self.forward_unet(\n sample=latent_model_input_cat,\n t=t,\n text_embeddings=text_embeddings,\n denoise_in_grid=denoise_in_grid,\n )\n\n noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred_all.chunk(3)\n\n noise_pred = (\n noise_pred_image\n + text_guidance_scale * (noise_pred_text - noise_pred_image)\n + image_guidance_scale * (noise_pred_image - noise_pred_uncond)\n )\n\n if multidiffusion_type == \"v_prediction\":\n v_prediction = get_v_prediction_from_epsilon(noise_pred, t, latents, self.scheduler.alphas_cumprod)\n value[indices] += v_prediction\n count[indices] += 1\n elif multidiffusion_type == \"epsilon\":\n value[indices] += noise_pred\n count[indices] += 1\n else:\n raise ValueError(\"Not implemented.\")\n\n # take the MultiDiffusion step\n final_noise_pred = torch.where(count > 0, value / count, value)\n\n if multidiffusion_type == \"v_prediction\":\n final_noise_pred = get_epsilon_from_v_prediction(\n final_noise_pred,\n t.item(),\n model_input.latents,\n self.scheduler.alphas_cumprod,\n )\n elif multidiffusion_type == \"epsilon\":\n pass\n else:\n raise ValueError(\"Not implemented.\")\n\n if only_noise_pred:\n return None, None, final_noise_pred\n\n scheduler_output = self.scheduler.step(final_noise_pred, t, model_input.latents, generator=generator)\n pred_prev_sample = scheduler_output.prev_sample\n pred_original_sample = scheduler_output.pred_original_sample\n\n assert not pred_prev_sample.isnan().any()\n assert not pred_original_sample.isnan().any()\n return pred_prev_sample, pred_original_sample, final_noise_pred\n\n def get_model_input(\n self,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_timestep: Optional[int] = None,\n keep_grad: bool = False,\n ) -> ModelInput:\n \"\"\"Returns the inputs for the unet.\"\"\"\n\n # TODO: incorporate seeds\n\n batch_size, _, height, width = image.shape\n\n noise = randn_tensor(\n shape=(\n batch_size,\n self.vae_latent_channels,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n ),\n generator=generator,\n device=torch.device(self.device),\n dtype=self.dtype,\n )\n if starting_image is not None:\n assert starting_timestep is not None\n if keep_grad:\n latents = self.encode_images(starting_image)\n else:\n with torch.no_grad():\n latents = self.encode_images(starting_image)\n latents = self.scheduler.add_noise(latents, noise, starting_timestep)\n else:\n latents = noise\n\n latents_mask = torch.nn.functional.interpolate(\n mask,\n size=(height // self.vae_scale_factor, width // self.vae_scale_factor),\n mode=\"nearest\",\n )\n assert len(torch.unique(latents_mask)) <= 2\n latents_mask = latents_mask.to(device=self.device, dtype=self.dtype)\n assert len(torch.unique(mask)) <= 2\n masked_image = torch.where(mask == 0, image, 0.5)\n with torch.no_grad():\n masked_image_latents = self.encode_images(masked_image)\n\n latents_mask_uncond = torch.ones_like(latents_mask)\n masked_image_uncond = torch.ones_like(masked_image) * 0.5\n with torch.no_grad():\n masked_image_latents_uncond = self.encode_images(masked_image_uncond)\n\n model_input = ModelInput(\n latents.to(device=self.device, dtype=self.dtype),\n latents_mask.to(device=self.device, dtype=self.dtype),\n masked_image_latents.to(device=self.device, dtype=self.dtype),\n latents_mask_uncond.to(device=self.device, dtype=self.dtype),\n masked_image_latents_uncond.to(device=self.device, dtype=self.dtype),\n noise.to(device=self.device, dtype=self.dtype),\n )\n\n return model_input\n\n def get_loss(\n self,\n x0: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n output_folder: Optional[Path] = None,\n step: int = 0,\n guidance_step: int = 0,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n ):\n \"\"\"Losses on the VAE decoded images x0.\n The multi-view loss is applied where mask == 0.0 (regions that have known depth).\n \"\"\"\n\n loss = 0.0\n\n if multiview_guidance_scale != 0.0:\n features = feature_extractor(x0.to(feature_extractor.device)).to(self.device)\n\n # multiview guidance\n scale_factor = features.shape[-1] / x0.shape[-1]\n K_scaled = rescale_intrinsics(K, scale_factor, scale_factor)\n mask_scaled = 1.0 - torch.nn.functional.interpolate(mask, scale_factor=scale_factor, mode=\"nearest\")\n depth_scaled = torch.nn.functional.interpolate(depth, scale_factor=scale_factor, mode=\"bilinear\")\n for cam1 in range(len(c2w)):\n for cam2 in range(cam1 + 1, len(c2w)):\n loss_mv, loss_dict = multiview_metric(\n features1=features[cam1 : cam1 + 1],\n features2=features[cam2 : cam2 + 1],\n K1=K_scaled[cam1 : cam1 + 1],\n K2=K_scaled[cam2 : cam2 + 1],\n c2w1=c2w[cam1 : cam1 + 1],\n c2w2=c2w[cam2 : cam2 + 1],\n image1=x0[cam1 : cam1 + 1],\n image2=x0[cam2 : cam2 + 1],\n mask1=mask_scaled[cam1 : cam1 + 1],\n mask2=mask_scaled[cam2 : cam2 + 1],\n depth1=depth_scaled[cam1 : cam1 + 1],\n depth2=depth_scaled[cam2 : cam2 + 1],\n output_folder=output_folder if (cam1 == 0 and guidance_step == 0) else None,\n suffix=f\"-{step:06d}-{cam1:06d}-{cam2:06d}-{guidance_step:06d}\",\n )\n loss += multiview_guidance_scale * loss_mv.sum()\n\n if reconstruction_guidance_scale != 0.0:\n loss += (\n reconstruction_guidance_scale * (((starting_image.to(x0.device) - x0) * mask.to(x0.device)) ** 2).mean()\n )\n\n return loss\n\n @torch.cuda.amp.autocast(enabled=True)\n def get_image(\n self,\n text_embeddings,\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n num_inference_steps: int = 20,\n denoise_in_grid: bool = False,\n depth: Optional[Float[Tensor, \"B 1 H W\"]] = None,\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n multidiffusion_steps: int = 1,\n multidiffusion_type: str = \"epsilon\",\n randomize_latents: bool = False,\n randomize_within_grid: bool = False,\n use_decoder_approximation: bool = False,\n multiview_guidance_scale: float = 0.0,\n reconstruction_guidance_scale: float = 0.0,\n feature_extractor: Optional[FeatureExtractor] = None,\n multiview_metric: Optional[MultiviewMetric] = None,\n K: Optional[Float[Tensor, \"B 3 3\"]] = None,\n c2w: Optional[Float[Tensor, \"B 3 4\"]] = None,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n show_multiview: bool = False,\n guidance_steps: List[int] = [5],\n num_guidance_steps: int = 10,\n classifier_guidance_scale: float = 0.0,\n output_folder: Optional[Path] = None,\n starting_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n starting_lower_bound: Optional[float] = None,\n starting_upper_bound: Optional[float] = None,\n classifier_guidance_loss_rescale=1000.0,\n classifier_guidance_start_step: int = 0,\n replace_original_pixels: bool = False,\n ) -> Float[Tensor, \"B 3 H W\"]:\n \"\"\"Run the denoising sampling process, also known as the reverse process.\n Inpaint where mask == 1.\n If output folder is not None, then save images to this folder.\n\n Args:\n text_embeddings: Either 2 per image (BB) or 2 total, which will use the same cond. and uncond. prompts for all.\n loss_rescale: To prevent fp16 underflow\n \"\"\"\n\n if output_folder:\n output_folder.mkdir(parents=True, exist_ok=True)\n\n batch_size, _, height, width = image.shape\n\n if starting_lower_bound is not None:\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n # select t, set multi-step diffusion\n T = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n self.scheduler.config.num_train_timesteps = T.item()\n else:\n self.scheduler.config.num_train_timesteps = self.num_train_timesteps\n\n self.scheduler.set_timesteps(num_inference_steps, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n # self.scheduler.config.num_train_timesteps == 1000 is equivalent to starting_lower_bound and starting_upper_bound both being 1\n # so start with full noise by setting this to None\n starting_image=starting_image if self.scheduler.config.num_train_timesteps != 1000 else None,\n starting_timestep=self.scheduler.timesteps[0],\n )\n\n if depth is None:\n depth = torch.zeros_like(mask)\n\n progress = Progress(\n TextColumn(\"[progress.description]{task.description}\"),\n BarColumn(),\n TaskProgressColumn(),\n TimeElapsedColumn(),\n )\n task1 = progress.add_task(\n f\"[green]Inpainting batch of images...\",\n total=len(self.scheduler.timesteps),\n )\n\n with progress:\n for i, t in enumerate(self.scheduler.timesteps):\n start_time = time.time()\n\n # DragDiffusion style guidance (\"drag\")\n use_drag_guidance = (\n multiview_guidance_scale != 0.0 or reconstruction_guidance_scale != 0.0\n ) and i in guidance_steps\n if use_drag_guidance:\n model_input.latents = model_input.latents.to(torch.float32).detach().requires_grad_(True)\n scaler = torch.cuda.amp.GradScaler()\n optimizer = torch.optim.Adam([model_input.latents], lr=1e-2)\n for guidance_step in range(num_guidance_steps):\n _, pred_original_sample, _ = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=1,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if output_folder:\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(\n output_folder / f\"x0-{i:06d}-{guidance_step:06d}.png\",\n image_x0,\n )\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"drag_guidance\",\n step=i,\n guidance_step=guidance_step,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/drag_guidance_loss-{i}\": loss})\n\n optimizer.zero_grad()\n assert not loss.isnan().any()\n scaler.scale(loss).backward()\n\n assert not model_input.latents.grad.isnan().any()\n # print(\n # model_input.latents.grad.abs().mean(),\n # (model_input.latents.grad == 0.0).sum() / model_input.latents.grad.numel(),\n # )\n\n scaler.step(optimizer)\n assert not model_input.latents.isnan().any()\n assert not depth.isnan().any()\n scaler.update()\n\n # take a step\n use_classifier_guidance = classifier_guidance_scale != 0.0 and i >= classifier_guidance_start_step\n model_input.latents = (\n model_input.latents.to(self.dtype).detach().requires_grad_(use_classifier_guidance)\n )\n with torch.enable_grad() if use_classifier_guidance else torch.no_grad():\n _, pred_original_sample, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n denoise_in_grid=denoise_in_grid,\n multidiffusion_steps=multidiffusion_steps,\n multidiffusion_type=multidiffusion_type,\n randomize_latents=randomize_latents,\n randomize_within_grid=randomize_within_grid,\n )\n\n # classifier guidance (\"classifier\")\n if use_classifier_guidance:\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=output_folder / \"classifier_guidance\",\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/classifier_guidance_loss\": loss})\n\n grad = (\n torch.autograd.grad(\n classifier_guidance_loss_rescale * loss,\n model_input.latents,\n )[0]\n / classifier_guidance_loss_rescale\n )\n # print(\n # grad.abs().mean(),\n # (grad == 0.0).sum() / grad.numel(),\n # )\n noise_pred = noise_pred + classifier_guidance_scale * grad\n\n model_input.latents = model_input.latents.detach().requires_grad_(False)\n scheduler_output = self.scheduler.step(noise_pred, t, model_input.latents, generator=generator)\n model_input.latents = scheduler_output.prev_sample\n\n if output_folder:\n # save the denoised x0\n with torch.no_grad():\n x0 = self.decode_latents(\n pred_original_sample,\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n\n if use_drag_guidance or use_classifier_guidance:\n loss = self.get_loss(\n x0=x0,\n mask=mask,\n depth=depth,\n multiview_guidance_scale=multiview_guidance_scale,\n reconstruction_guidance_scale=reconstruction_guidance_scale,\n feature_extractor=feature_extractor,\n multiview_metric=multiview_metric,\n K=K,\n c2w=c2w,\n output_folder=None,\n step=i,\n guidance_step=0,\n starting_image=starting_image,\n )\n if wandb.run is not None:\n wandb.log({f\"{output_folder.name}/loss\": loss})\n\n image_x0 = torch.cat(list(x0.permute(0, 2, 3, 1)), dim=1).detach().cpu()\n mediapy.write_image(output_folder / \"x0.png\", image_x0)\n mediapy.write_image(output_folder / f\"x0-{i:06d}.png\", image_x0)\n\n progress.update(task1, advance=1)\n end_time = time.time()\n # print(f\"[green]Time for iter {i}:\", end_time - start_time)\n\n if output_folder:\n output_filename = str(output_folder) + \".mp4\"\n CONSOLE.print(f\"[green]Saving video to {output_filename}\")\n save_video_from_path(\n path=output_folder,\n glob_str=\"x0*png\",\n sec=10,\n output_filename=output_filename,\n )\n\n with torch.no_grad():\n x0 = self.decode_latents(\n model_input.latents.detach(),\n use_decoder_approximation=use_decoder_approximation,\n ).to(torch.float32)\n x0 = torch.where(mask == 0, image, x0) if replace_original_pixels else x0\n return x0\n\n def encode_images(self, imgs: Float[Tensor, \"B 3 512 512\"]) -> Float[Tensor, \"B 4 64 64\"]:\n imgs = imgs * 2.0 - 1.0\n sampled_posterior = self.vae.encode(imgs.to(self.vae_device), return_dict=False)[0].sample().to(self.device)\n latents = sampled_posterior * 0.18215\n return latents\n\n def decode_latents(\n self,\n latents: Float[Tensor, \"B 4 H W\"],\n use_decoder_approximation: bool = False,\n ) -> Float[Tensor, \"B 3 Hout Wout\"]:\n if use_decoder_approximation:\n da = get_decoder_approximation().to(latents)\n x = torch.nn.functional.interpolate(latents, scale_factor=self.vae_scale_factor, mode=\"bilinear\")\n x = torch.matmul(x.permute(0, 2, 3, 1), da).permute(0, 3, 1, 2)\n return x\n else:\n scaled_latents = 1 / 0.18215 * latents\n image = self.vae.decode(scaled_latents.to(self.vae_device), return_dict=False)[0].to(self.device)\n image = (image * 0.5 + 0.5).clamp(0, 1)\n return image\n\n def sds_loss(\n self,\n text_embeddings: Union[Float[Tensor, \"BB 77 768\"], Float[Tensor, \"2 77 768\"]],\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n starting_image: Float[Tensor, \"B 3 H W\"],\n text_guidance_scale: Optional[float] = None,\n image_guidance_scale: Optional[float] = None,\n starting_lower_bound: float = 0.02,\n starting_upper_bound: float = 0.98,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n ) -> torch.Tensor:\n \"\"\"Score Distilation Sampling loss proposed in DreamFusion paper (https://dreamfusion3d.github.io/)\n Args:\n text_embeddings: Text embeddings\n image: Rendered image\n mask: Mask, inpaint where 1\n text_guidance_scale: How much to weigh the guidance\n image_guidance_scale: How much to weigh the guidance\n Returns:\n The loss\n \"\"\"\n\n # NOTE: doesn't work for gridding right now\n\n batch_size, _, height, width = image.shape\n\n min_step = int(self.num_train_timesteps * starting_lower_bound)\n max_step = int(self.num_train_timesteps * starting_upper_bound)\n\n t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device)\n\n model_input = self.get_model_input(\n image=image,\n mask=mask,\n generator=generator,\n starting_image=starting_image,\n starting_timestep=t,\n keep_grad=True,\n )\n\n # predict the noise residual with unet, NO grad!\n with torch.no_grad():\n _, _, noise_pred = self.get_noise_pred(\n t,\n model_input,\n text_embeddings,\n text_guidance_scale=text_guidance_scale,\n image_guidance_scale=image_guidance_scale,\n only_noise_pred=True,\n )\n\n # w(t), sigma_t^2\n w = 1 - self.alphas[t]\n\n grad = w * (noise_pred - model_input.noise)\n grad = torch.nan_to_num(grad)\n\n target = (model_input.latents - grad).detach()\n loss = (\n 0.5\n * torch.nn.functional.mse_loss(model_input.latents, target, reduction=\"sum\")\n / model_input.latents.shape[0]\n )\n\n return loss" }, { "identifier": "LaMaInpainter", "path": "nerfiller/inpaint/lama_inpainter.py", "snippet": "class LaMaInpainter:\n \"\"\"LaMa inpainter model.\"\"\"\n\n def __init__(self, device: str = \"cuda:0\", model_path: Path = Path(\"data/models/big-lama\")):\n print(f\"Loading LaMa inpainter ...\")\n\n self.device = device\n\n train_config_path = os.path.join(model_path, \"config.yaml\")\n with open(train_config_path, \"r\") as f:\n train_config = OmegaConf.create(yaml.safe_load(f))\n\n train_config.training_model.predict_only = True\n train_config.visualizer.kind = \"noop\"\n\n checkpoint_path = os.path.join(model_path, \"models\", \"best.ckpt\")\n\n self.model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location=\"cpu\")\n self.model.freeze()\n self.model.to(self.device)\n\n def get_image(self, image: Float[Tensor, \"B 3 H W\"], mask: Float[Tensor, \"B 1 H W\"]):\n with torch.no_grad():\n batch = {}\n batch[\"image\"] = image\n batch[\"mask\"] = mask\n batch = self.model(batch)\n inpainted_image = batch[\"inpainted\"]\n return inpainted_image" }, { "identifier": "parse_nerfstudio_frame", "path": "nerfiller/nerf/dataset_utils.py", "snippet": "def parse_nerfstudio_frame(\n transforms: Dict,\n data_path: Path,\n idx: int,\n depth_max: int = None,\n device: str = \"cpu\",\n size: Optional[Tuple[int, int]] = None,\n dtype=torch.float32,\n):\n \"\"\"Parses a Nerfstudio frame, where idx == 0 is the first image sorted by filename.\n The frames are not normally sorted, but we sort them before doing any operations.\n We return processed information where we load images, depth maps, and masks, useful for inpainting this dataset.\n Size will resize the image to (height, width).\n \"\"\"\n sorted_frames = sorted(transforms[\"frames\"], key=lambda x: x[\"file_path\"])\n imf = data_path / Path(sorted_frames[idx][\"file_path\"])\n image = torch.from_numpy(mediapy.read_image(imf) / 255.0).permute(2, 0, 1)[None].to(dtype).to(device)\n if \"mask_path\" in sorted_frames[idx]:\n maf = data_path / Path(sorted_frames[idx][\"mask_path\"])\n mask = 1 - torch.from_numpy(mediapy.read_image(maf) / 255.0)[None, None].to(dtype).to(device)\n else:\n mask = torch.zeros_like(image[:, :1])\n if \"depth_file_path\" in sorted_frames[idx]:\n daf = data_path / Path(sorted_frames[idx][\"depth_file_path\"])\n depth = torch.from_numpy(np.load(daf))[None, None].to(dtype).to(device)\n else:\n depth = torch.zeros_like(image[:, :1])\n # image *= 1 - mask\n # depth *= 1 - mask\n if depth_max:\n depth[depth > depth_max] = 0.0\n # check if the values are stored per frame\n if \"fl_x\" in sorted_frames[idx]:\n fx = sorted_frames[idx][\"fl_x\"]\n fy = sorted_frames[idx][\"fl_y\"]\n cx = sorted_frames[idx][\"cx\"]\n cy = sorted_frames[idx][\"cy\"]\n else:\n fx = transforms[\"fl_x\"]\n fy = transforms[\"fl_y\"]\n cx = transforms[\"cx\"]\n cy = transforms[\"cy\"]\n K = torch.tensor([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=torch.float32, device=device)\n c2wh = torch.tensor(sorted_frames[idx][\"transform_matrix\"]).to(torch.float32).to(device)\n c2w = c2wh[:3]\n w2ch = torch.inverse(c2wh)\n w2c = w2ch[:3]\n K = K[None]\n c2w = c2w[None]\n\n if size:\n scale_factor_x = size[1] / image.shape[-1]\n scale_factor_y = size[0] / image.shape[-2]\n image = torch.nn.functional.interpolate(image, size=size, mode=\"bilinear\")\n depth = torch.nn.functional.interpolate(depth, size=size, mode=\"bilinear\")\n mask = torch.nn.functional.interpolate(mask, size=size, mode=\"nearest\")\n K = rescale_intrinsics(K, scale_factor_x, scale_factor_y)\n\n return image, depth, mask, c2w, K" }, { "identifier": "get_inpainted_image_row", "path": "nerfiller/utils/image_utils.py", "snippet": "def get_inpainted_image_row(\n image: Float[Tensor, \"B 3 H W\"],\n mask: Float[Tensor, \"B 1 H W\"],\n inpainted_image: Optional[Float[Tensor, \"B 3 H W\"]] = None,\n color: Tuple[float, float, float] = Colors.NEON_PINK.value,\n show_original: bool = False,\n):\n \"\"\"Returns an image concatenated along the x-axis. It has the following form:\n image with inpaint regions highlighted | image with inpainted regions\n Inpaint where mask == 1.\n The default color is neon pink.\n If the inpainted image is None, then just show the `image with inpaint regions highlighted`.\n \"\"\"\n device = image.device\n c = torch.tensor(color, device=device).view(1, 3, 1, 1)\n color_image = torch.ones_like(image) * c\n image_with_highlights = torch.where(mask == 1, color_image, image)\n image_list = [image_with_highlights]\n if inpainted_image is not None:\n image_list = image_list + [inpainted_image]\n if show_original:\n image_list = [image] + image_list\n im = torch.cat(image_list, dim=-2)\n return im" }, { "identifier": "rescale_intrinsics", "path": "nerfiller/utils/camera_utils.py", "snippet": "def rescale_intrinsics(Ks: Float[Tensor, \"B 3 3 3\"], scale_factor_x: float, scale_factor_y: float):\n Ks_new = Ks.clone()\n Ks_new[:, 0:1] *= scale_factor_x\n Ks_new[:, 1:2] *= scale_factor_y\n return Ks_new" }, { "identifier": "InpaintConfig", "path": "nerfiller/configs/inpaint.py", "snippet": "class InpaintConfig:" }, { "identifier": "register_extended_attention", "path": "nerfiller/utils/diff_utils.py", "snippet": "def register_extended_attention(unet):\n \"\"\"Method from Tune-A-Video, but code modified from TokenFlow codebase.\"\"\"\n\n def sa_forward(self):\n to_out = self.to_out\n if type(to_out) is torch.nn.modules.container.ModuleList:\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n def forward(x, encoder_hidden_states=None, attention_mask=None):\n batch_size, sequence_length, dim = x.shape\n h = self.heads\n # Here we are making an assumption about passing in 3 varients of conditioning into the model\n n_frames = batch_size // 3\n is_cross = encoder_hidden_states is not None\n encoder_hidden_states = encoder_hidden_states if is_cross else x\n q = self.to_q(x)\n k = self.to_k(encoder_hidden_states)\n v = self.to_v(encoder_hidden_states)\n\n k_0 = k[:n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n k_1 = k[n_frames : 2 * n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n k_2 = k[2 * n_frames :].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_0 = v[:n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_1 = v[n_frames : 2 * n_frames].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n v_2 = v[2 * n_frames :].reshape(1, n_frames * sequence_length, -1).repeat(n_frames, 1, 1)\n\n q_0 = self.head_to_batch_dim(q[:n_frames])\n q_1 = self.head_to_batch_dim(q[n_frames : 2 * n_frames])\n q_2 = self.head_to_batch_dim(q[2 * n_frames :])\n k_0 = self.head_to_batch_dim(k_0)\n k_1 = self.head_to_batch_dim(k_1)\n k_2 = self.head_to_batch_dim(k_2)\n v_0 = self.head_to_batch_dim(v_0)\n v_1 = self.head_to_batch_dim(v_1)\n v_2 = self.head_to_batch_dim(v_2)\n\n out_0 = []\n out_1 = []\n out_2 = []\n\n q_0 = q_0.view(n_frames, h, sequence_length, dim // h)\n k_0 = k_0.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_0 = v_0.view(n_frames, h, sequence_length * n_frames, dim // h)\n q_1 = q_1.view(n_frames, h, sequence_length, dim // h)\n k_1 = k_1.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_1 = v_1.view(n_frames, h, sequence_length * n_frames, dim // h)\n q_2 = q_2.view(n_frames, h, sequence_length, dim // h)\n k_2 = k_2.view(n_frames, h, sequence_length * n_frames, dim // h)\n v_2 = v_2.view(n_frames, h, sequence_length * n_frames, dim // h)\n\n for j in range(h):\n sim_0 = torch.bmm(q_0[:, j], k_0[:, j].transpose(-1, -2)) * self.scale\n sim_1 = torch.bmm(q_1[:, j], k_1[:, j].transpose(-1, -2)) * self.scale\n sim_2 = torch.bmm(q_2[:, j], k_2[:, j].transpose(-1, -2)) * self.scale\n\n out_0.append(torch.bmm(sim_0.softmax(dim=-1), v_0[:, j]))\n out_1.append(torch.bmm(sim_1.softmax(dim=-1), v_1[:, j]))\n out_2.append(torch.bmm(sim_2.softmax(dim=-1), v_2[:, j]))\n\n out_0 = (\n torch.cat(out_0, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n out_1 = (\n torch.cat(out_1, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n out_2 = (\n torch.cat(out_2, dim=0)\n .view(h, n_frames, sequence_length, dim // h)\n .permute(1, 0, 2, 3)\n .reshape(h * n_frames, sequence_length, -1)\n )\n\n out = torch.cat([out_0, out_1, out_2], dim=0)\n out = self.batch_to_head_dim(out)\n\n return to_out(out)\n\n return forward\n\n for _, unet_module in unet.named_modules():\n if isinstance_str(unet_module, \"BasicTransformerBlock\"):\n module = unet_module.attn1\n module.forward = sa_forward(module)\n\n res_dict = {1: [1, 2], 2: [0, 1, 2], 3: [0, 1, 2]}\n # we are injecting attention in blocks 4 - 11 of the decoder, so not in the first block of the lowest resolution\n for res in res_dict:\n for block in res_dict[res]:\n module = unet.up_blocks[res].attentions[block].transformer_blocks[0].attn1\n module.forward = sa_forward(module)" }, { "identifier": "downscale_mask", "path": "nerfiller/utils/mask_utils.py", "snippet": "def downscale_mask(mask, size=None, scale_factor=None, dilate_iters=0, dilate_kernel_size=3):\n \"\"\"\n Downscale the mask in a conservative way. 1s are where to inpaint, 0 where to not inpaint.\n Inpaints extra pixels to prevent leakage under the mask.\n \"\"\"\n assert size or scale_factor\n if size:\n assert scale_factor is None\n if scale_factor:\n assert size is None\n for _ in range(dilate_iters):\n mask = dilate(mask, kernel_size=dilate_kernel_size)\n mask = torch.nn.functional.interpolate(mask, size=size, scale_factor=scale_factor, mode=\"bilinear\")\n mask = (mask != 0.0).float() # expands the mask slightly for no leakage of pixels\n return mask" } ]
import json import shutil import mediapy import torch import tyro import math from pathlib import Path from nerfiller.inpaint.rgb_inpainter import RGBInpainter from nerfiller.inpaint.lama_inpainter import LaMaInpainter from nerfiller.nerf.dataset_utils import parse_nerfstudio_frame from nerfiller.utils.image_utils import get_inpainted_image_row from nerfiller.utils.camera_utils import rescale_intrinsics from nerfiller.configs.inpaint import InpaintConfig, AnnotatedBaseConfigUnion from datetime import datetime from nerfiller.utils.diff_utils import register_extended_attention from nerfiller.utils.mask_utils import downscale_mask
11,842
# Setup the modules for guidance. # multiview_metric = ReprojectMetric(lossfeatmult=1.0) # feature_extractor = SuperPointExtractor(device=config.device) # TODO: make sure feature_extractor is half precision with half_precision_weights # Copy the original dataset besides the images, which we will inpaint. output_folder = ( Path(str(config.nerfstudio_dataset) + "-" + "inpaint") / str(config.method_name) / datetime.now().strftime("%Y-%m-%d_%H%M%S") ) output_folder.mkdir(parents=True) shutil.copytree(config.nerfstudio_dataset / "images", output_folder / "original_images") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "original_masks") shutil.copytree(config.nerfstudio_dataset / "depth", output_folder / "depth") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "masks") shutil.copy(config.nerfstudio_dataset / "transforms.json", output_folder / "transforms.json") (output_folder / "images").mkdir(parents=True) (output_folder / "inpaint").mkdir(parents=True) f = open(config.nerfstudio_dataset / "transforms.json") transforms = json.load(f) f.close() num_images = len(transforms["frames"]) if config.randomize_image_order: indices = torch.randperm(num_images) else: indices = torch.arange(num_images) padded_num_images = config.chunk_size * math.ceil(num_images / config.chunk_size) if num_images != padded_num_images: indices = torch.cat([indices, indices[: padded_num_images - num_images]]) for i in range(0, padded_num_images - config.chunk_size + 1, config.new_size): images = [] masks = [] depths = [] Ks = [] c2ws = [] for j in range(i, i + config.chunk_size): if i == 0 or j >= (i + config.chunk_size - config.new_size): # new stuff to inpaint image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, config.nerfstudio_dataset, indices[j], depth_max=config.depth_max, device=config.device, ) else: # old stuff already inpainted image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, output_folder, indices[j], depth_max=config.depth_max, device=config.device, ) images.append(image) masks.append(mask) depths.append(depth) Ks.append(K) c2ws.append(c2w) images = torch.cat(images) masks = torch.cat(masks) depths = torch.cat(depths) Ks = torch.cat(Ks) c2ws = torch.cat(c2ws) # generator = [ # torch.Generator(device=config.device).manual_seed(int(indices[j])) for j in range(i, i + config.chunk_size) # ] generator = None image = torch.nn.functional.interpolate(images, scale_factor=config.scale_factor, mode="bilinear") depth = torch.nn.functional.interpolate(depths, scale_factor=config.scale_factor, mode="bilinear") mask = downscale_mask( masks, scale_factor=config.scale_factor, dilate_iters=config.dilate_iters, dilate_kernel_size=config.dilate_kernel_size, ) if config.method_name == "individual-lama": imagein = rgb_inpainter.get_image(image=image, mask=mask) else: enable_gradient = config.num_guidance_steps > 0 and len(config.guidance_steps) > 0 with torch.enable_grad() if enable_gradient else torch.no_grad(): imagein = rgb_inpainter.get_image( text_embeddings=text_embeddings, image=image, mask=mask, depth=depth, denoise_in_grid=config.denoise_in_grid, multidiffusion_steps=config.multidiffusion_steps, randomize_latents=config.randomize_latents, text_guidance_scale=config.text_guidance_scale, image_guidance_scale=config.image_guidance_scale, num_inference_steps=config.num_inference_steps, num_guidance_steps=config.num_guidance_steps, classifier_guidance_scale=config.classifier_guidance_scale, guidance_steps=config.guidance_steps, multiview_guidance_scale=config.multiview_guidance_scale, K=rescale_intrinsics(Ks, config.scale_factor, config.scale_factor), c2w=c2ws, output_folder=output_folder / "inpaint" / f"{i:06d}-{i+config.chunk_size:06d}" if config.save_intermediates else None, show_multiview=False, generator=generator, use_decoder_approximation=config.use_decoder_approximation, ) # TODO: use an upscaler here imagein = torch.nn.functional.interpolate(imagein, scale_factor=1 / config.scale_factor, mode="bilinear") imagein = torch.where(masks == 1, imagein, images)
def main( config: InpaintConfig, ): """ Inpaint a Nerfstudio dataset where the masks == 0. """ if config.method_name == "individual-lama": rgb_inpainter = LaMaInpainter(device=config.device, model_path=Path("data/models/big-lama")) else: # Load the inpainting module. rgb_inpainter = RGBInpainter( half_precision_weights=config.half_precision_weights, lora_model_path=config.lora_model_path, device=config.device, vae_device=config.vae_device, ) if config.text_guidance_scale != 0.0: assert config.prompt != "", "You need to set an actual prompt to use this method." # Process the text prompts. text_embeddings = rgb_inpainter.compute_text_embeddings(config.prompt, config.negative_prompt) if config.use_expanded_attention: register_extended_attention(rgb_inpainter.unet) # Setup the modules for guidance. # multiview_metric = ReprojectMetric(lossfeatmult=1.0) # feature_extractor = SuperPointExtractor(device=config.device) # TODO: make sure feature_extractor is half precision with half_precision_weights # Copy the original dataset besides the images, which we will inpaint. output_folder = ( Path(str(config.nerfstudio_dataset) + "-" + "inpaint") / str(config.method_name) / datetime.now().strftime("%Y-%m-%d_%H%M%S") ) output_folder.mkdir(parents=True) shutil.copytree(config.nerfstudio_dataset / "images", output_folder / "original_images") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "original_masks") shutil.copytree(config.nerfstudio_dataset / "depth", output_folder / "depth") shutil.copytree(config.nerfstudio_dataset / "masks", output_folder / "masks") shutil.copy(config.nerfstudio_dataset / "transforms.json", output_folder / "transforms.json") (output_folder / "images").mkdir(parents=True) (output_folder / "inpaint").mkdir(parents=True) f = open(config.nerfstudio_dataset / "transforms.json") transforms = json.load(f) f.close() num_images = len(transforms["frames"]) if config.randomize_image_order: indices = torch.randperm(num_images) else: indices = torch.arange(num_images) padded_num_images = config.chunk_size * math.ceil(num_images / config.chunk_size) if num_images != padded_num_images: indices = torch.cat([indices, indices[: padded_num_images - num_images]]) for i in range(0, padded_num_images - config.chunk_size + 1, config.new_size): images = [] masks = [] depths = [] Ks = [] c2ws = [] for j in range(i, i + config.chunk_size): if i == 0 or j >= (i + config.chunk_size - config.new_size): # new stuff to inpaint image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, config.nerfstudio_dataset, indices[j], depth_max=config.depth_max, device=config.device, ) else: # old stuff already inpainted image, depth, mask, c2w, K = parse_nerfstudio_frame( transforms, output_folder, indices[j], depth_max=config.depth_max, device=config.device, ) images.append(image) masks.append(mask) depths.append(depth) Ks.append(K) c2ws.append(c2w) images = torch.cat(images) masks = torch.cat(masks) depths = torch.cat(depths) Ks = torch.cat(Ks) c2ws = torch.cat(c2ws) # generator = [ # torch.Generator(device=config.device).manual_seed(int(indices[j])) for j in range(i, i + config.chunk_size) # ] generator = None image = torch.nn.functional.interpolate(images, scale_factor=config.scale_factor, mode="bilinear") depth = torch.nn.functional.interpolate(depths, scale_factor=config.scale_factor, mode="bilinear") mask = downscale_mask( masks, scale_factor=config.scale_factor, dilate_iters=config.dilate_iters, dilate_kernel_size=config.dilate_kernel_size, ) if config.method_name == "individual-lama": imagein = rgb_inpainter.get_image(image=image, mask=mask) else: enable_gradient = config.num_guidance_steps > 0 and len(config.guidance_steps) > 0 with torch.enable_grad() if enable_gradient else torch.no_grad(): imagein = rgb_inpainter.get_image( text_embeddings=text_embeddings, image=image, mask=mask, depth=depth, denoise_in_grid=config.denoise_in_grid, multidiffusion_steps=config.multidiffusion_steps, randomize_latents=config.randomize_latents, text_guidance_scale=config.text_guidance_scale, image_guidance_scale=config.image_guidance_scale, num_inference_steps=config.num_inference_steps, num_guidance_steps=config.num_guidance_steps, classifier_guidance_scale=config.classifier_guidance_scale, guidance_steps=config.guidance_steps, multiview_guidance_scale=config.multiview_guidance_scale, K=rescale_intrinsics(Ks, config.scale_factor, config.scale_factor), c2w=c2ws, output_folder=output_folder / "inpaint" / f"{i:06d}-{i+config.chunk_size:06d}" if config.save_intermediates else None, show_multiview=False, generator=generator, use_decoder_approximation=config.use_decoder_approximation, ) # TODO: use an upscaler here imagein = torch.nn.functional.interpolate(imagein, scale_factor=1 / config.scale_factor, mode="bilinear") imagein = torch.where(masks == 1, imagein, images)
imageinrow = get_inpainted_image_row(image=images, mask=masks, inpainted_image=imagein, show_original=True)
3
2023-12-07 19:12:08+00:00
16k
nnanhuang/Customize-it-3D
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n t_start=-1):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback: \n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head ** -0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim),\n nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
12,358
force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( self.first_stage_model, IdentityFirstStage): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta, quantize_denoised=True) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning(N, unconditional_guidance_label, image_size=x.shape[-1]) # uc = torch.zeros_like(c) with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1. - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising(c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = [] if self.unet_trainable == "attn": print("Training only unet attention layers") for n, m in self.model.named_modules():
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape)==len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1-p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05): x = super().get_input(batch, k) T = batch['T'].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1") null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [self.cc_projection(torch.cat([torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :]], dim=-1))] cond["c_concat"] = [input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None, image_size=512): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(self.device)] return cond @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( self.first_stage_model, IdentityFirstStage): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, ddim_steps=ddim_steps,eta=ddim_eta, quantize_denoised=True) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning(N, unconditional_guidance_label, image_size=x.shape[-1]) # uc = torch.zeros_like(c) with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1. - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising(c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = [] if self.unet_trainable == "attn": print("Training only unet attention layers") for n, m in self.model.named_modules():
if isinstance(m, CrossAttention) and n.endswith('attn2'):
18
2023-12-14 11:03:35+00:00
16k
mkang315/ASF-YOLO
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n ie = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if network.get_parameters()[0].get_layout().empty:\n network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n batch_dim = get_batch(network)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n executable_network = ie.compile_model(network, device_name=\"CPU\") # device_name=\"MYRIAD\" for Intel NCS2\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, \"r\") as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith(\"tensorflow\")\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.executable_network([im]).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "IMG_FORMATS", "path": "utils/dataloaders.py", "snippet": "IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/dataloaders.py", "snippet": "VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes" }, { "identifier": "LoadImages", "path": "utils/dataloaders.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n files = []\n for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n p = str(Path(p).resolve())\n if '*' in p:\n files.extend(sorted(glob.glob(p, recursive=True))) # glob\n elif os.path.isdir(p):\n files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir\n elif os.path.isfile(p):\n files.append(p) # files\n else:\n raise FileNotFoundError(f'{p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n self.transforms = transforms # optional\n self.vid_stride = vid_stride # video frame-rate stride\n if any(videos):\n self._new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n for _ in range(self.vid_stride):\n self.cap.grab()\n ret_val, im0 = self.cap.retrieve()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n path = self.files[self.count]\n self._new_video(path)\n ret_val, im0 = self.cap.read()\n\n self.frame += 1\n # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n im0 = cv2.imread(path) # BGR\n assert im0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n\n return path, im, im0, self.cap, s\n\n def _new_video(self, path):\n # Create a new video capture object\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)\n self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees\n # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493\n\n def _cv2_rotate(self, im):\n # Rotate a cv2 video manually\n if self.orientation == 0:\n return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)\n elif self.orientation == 180:\n return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif self.orientation == 90:\n return cv2.rotate(im, cv2.ROTATE_180)\n return im\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadScreenshots", "path": "utils/dataloaders.py", "snippet": "class LoadScreenshots:\n # YOLOv5 screenshot dataloader, i.e. `python detect.py --source \"screen 0 100 100 512 256\"`\n def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):\n # source = [screen_number left top width height] (pixels)\n check_requirements('mss')\n import mss\n\n source, *params = source.split()\n self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0\n if len(params) == 1:\n self.screen = int(params[0])\n elif len(params) == 4:\n left, top, width, height = (int(x) for x in params)\n elif len(params) == 5:\n self.screen, left, top, width, height = (int(x) for x in params)\n self.img_size = img_size\n self.stride = stride\n self.transforms = transforms\n self.auto = auto\n self.mode = 'stream'\n self.frame = 0\n self.sct = mss.mss()\n\n # Parse monitor shape\n monitor = self.sct.monitors[self.screen]\n self.top = monitor[\"top\"] if top is None else (monitor[\"top\"] + top)\n self.left = monitor[\"left\"] if left is None else (monitor[\"left\"] + left)\n self.width = width or monitor[\"width\"]\n self.height = height or monitor[\"height\"]\n self.monitor = {\"left\": self.left, \"top\": self.top, \"width\": self.width, \"height\": self.height}\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # mss screen capture: get raw pixels from the screen as np array\n im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR\n s = f\"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: \"\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n self.frame += 1\n return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s" }, { "identifier": "LoadStreams", "path": "utils/dataloaders.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n torch.backends.cudnn.benchmark = True # faster for fixed-size inference\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n self.vid_stride = vid_stride # video frame-rate stride\n sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]\n n = len(sources)\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video\n # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'\n check_requirements(('pafy', 'youtube_dl==2020.12.2'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n if s == 0:\n assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'\n assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n self.auto = auto and self.rect\n self.transforms = transforms # optional\n if not self.rect:\n LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f = 0, self.frames[i] # frame number, frame array\n while cap.isOpened() and n < f:\n n += 1\n cap.grab() # .read() = .grab() followed by .retrieve()\n if n % self.vid_stride == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(0.0) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n im0 = self.imgs.copy()\n if self.transforms:\n im = np.stack([self.transforms(x) for x in im0]) # transforms\n else:\n im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n im = np.ascontiguousarray(im) # contiguous\n\n return self.sources, im, im0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = \"yolov5\"\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_notebook():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.7.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef my_soft_nms(bboxes, scores, iou_thresh=0.5, sigma=0.5, score_threshold=0.25):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(path, flags=cv2.IMREAD_COLOR):\ndef imwrite(path, im):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "RANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_pil_font(font=FONT, size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):\n def fromarray(self, im):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output, max_det=300):\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path('')):\ndef imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "masks2segments", "path": "utils/segment/general.py", "snippet": "def masks2segments(masks, strategy='largest'):\n # Convert masks(n,160,160) into segments(n,xy)\n segments = []\n for x in masks.int().cpu().numpy().astype('uint8'):\n c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n if c:\n if strategy == 'concat': # concatenate all segments\n c = np.concatenate([x.reshape(-1, 2) for x in c])\n elif strategy == 'largest': # select largest segment\n c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)\n else:\n c = np.zeros((0, 2)) # no segments found\n segments.append(c.astype('float32'))\n return segments" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, dst_shape):\n \"\"\"\n Crop after upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new\n pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import os import platform import sys import time import torch from pathlib import Path from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
14,309
# Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/predict-seg', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride retina_masks=False, ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader bs = 1 # batch_size if webcam: view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim # Inference with dt[1]: act = time.time() visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred, proto = model(im, augment=augment, visualize=visualize)[:2] print('time.time():',time.time()-act) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): if retina_masks: # scale bbox first the crop masks det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC else: masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ if retina_masks else im[i] annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file segj = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Stream results im0 = annotator.result() if view_img: if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) if cv2.waitKey(1) == ord('q'): # 1 millisecond exit() # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
6
2023-12-10 14:18:29+00:00
16k
youngskkim/CRN
models/camera_radar_net_det.py
[ { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" }, { "identifier": "RVTLSSFPN", "path": "layers/backbones/rvt_lss_fpn.py", "snippet": "class RVTLSSFPN(BaseLSSFPN):\n def __init__(self, **kwargs):\n super(RVTLSSFPN, self).__init__(**kwargs)\n\n self.register_buffer('frustum', self.create_frustum())\n self.z_bound = kwargs['z_bound']\n self.radar_view_transform = kwargs['radar_view_transform']\n self.camera_aware = kwargs['camera_aware']\n\n self.depth_net = self._configure_depth_net(kwargs['depth_net_conf'])\n self.view_aggregation_net = ViewAggregation(self.output_channels*2,\n self.output_channels*2,\n self.output_channels)\n\n def _configure_depth_net(self, depth_net_conf):\n return DepthNet(\n depth_net_conf['in_channels'],\n depth_net_conf['mid_channels'],\n self.output_channels,\n self.depth_channels,\n camera_aware=self.camera_aware\n )\n\n def get_geometry_collapsed(self, sensor2ego_mat, intrin_mat, ida_mat, bda_mat,\n z_min=-5., z_max=3.):\n batch_size, num_cams, _, _ = sensor2ego_mat.shape\n\n # undo post-transformation\n # B x N x D x H x W x 3\n points = self.frustum\n ida_mat = ida_mat.view(batch_size, num_cams, 1, 1, 1, 4, 4)\n points = ida_mat.inverse().matmul(points.unsqueeze(-1)).double()\n # cam_to_ego\n points = torch.cat(\n (points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3],\n points[:, :, :, :, :, 2:]), 5)\n\n combine = sensor2ego_mat.matmul(torch.inverse(intrin_mat)).double()\n points = combine.view(batch_size, num_cams, 1, 1, 1, 4,\n 4).matmul(points).half()\n if bda_mat is not None:\n bda_mat = bda_mat.unsqueeze(1).repeat(1, num_cams, 1, 1).view(\n batch_size, num_cams, 1, 1, 1, 4, 4)\n points = (bda_mat @ points).squeeze(-1)\n else:\n points = points.squeeze(-1)\n\n points_out = points[:, :, :, 0:1, :, :3]\n points_valid_z = ((points[..., 2] > z_min) & (points[..., 2] < z_max))\n\n return points_out, points_valid_z\n\n def _forward_view_aggregation_net(self, img_feat_with_depth):\n # BEVConv2D [n, c, d, h, w] -> [n, h, c, w, d]\n img_feat_with_depth = img_feat_with_depth.permute(\n 0, 3, 1, 4, 2).contiguous() # [n, c, d, h, w] -> [n, h, c, w, d]\n n, h, c, w, d = img_feat_with_depth.shape\n img_feat_with_depth = img_feat_with_depth.view(-1, c, w, d)\n img_feat_with_depth = (\n self.view_aggregation_net(img_feat_with_depth).view(\n n, h, c//2, w, d).permute(0, 2, 4, 1, 3).contiguous().float())\n return img_feat_with_depth\n\n def _forward_depth_net(self, feat, mats_dict):\n return self.depth_net(feat, mats_dict)\n\n def _split_batch_cam(self, feat, inv=False, num_cams=6):\n batch_size = feat.shape[0]\n if not inv:\n return feat.reshape(batch_size // num_cams, num_cams, *feat.shape[1:])\n else:\n return feat.reshape(batch_size * num_cams, *feat.shape[2:])\n\n def _forward_single_sweep(self,\n sweep_index,\n sweep_imgs,\n mats_dict,\n pts_context,\n pts_occupancy,\n return_depth=False):\n \"\"\"Forward function for single sweep.\n\n Args:\n sweep_index (int): Index of sweeps.\n sweep_imgs (Tensor): Input images.\n mats_dict (dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego.\n intrin_mats(Tensor): Intrinsic matrix.\n ida_mats(Tensor): Transformation matrix for ida.\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera.\n bda_mat(Tensor): Rotation matrix for bda.\n ptss_context(Tensor): Input point context feature.\n ptss_occupancy(Tensor): Input point occupancy.\n return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Returns:\n Tensor: BEV feature map.\n \"\"\"\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t5 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n\n # extract image feature\n img_feats = self.get_cam_feats(sweep_imgs)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img_backbone'].append(t1.elapsed_time(t2))\n\n source_features = img_feats[:, 0, ...]\n source_features = self._split_batch_cam(source_features, inv=True, num_cams=num_cams)\n\n # predict image context feature, depth distribution\n depth_feature = self._forward_depth_net(\n source_features,\n mats_dict,\n )\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['img_dep'].append(t2.elapsed_time(t3))\n\n image_feature = depth_feature[:, self.depth_channels:(self.depth_channels + self.output_channels)]\n\n depth_occupancy = depth_feature[:, :self.depth_channels].softmax(\n dim=1, dtype=depth_feature.dtype)\n img_feat_with_depth = depth_occupancy.unsqueeze(1) * image_feature.unsqueeze(2)\n\n # calculate frustum grid within valid height\n geom_xyz, geom_xyz_valid = self.get_geometry_collapsed(\n mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n mats_dict['intrin_mats'][:, sweep_index, ...],\n mats_dict['ida_mats'][:, sweep_index, ...],\n mats_dict.get('bda_mat', None))\n\n geom_xyz_valid = self._split_batch_cam(geom_xyz_valid, inv=True, num_cams=num_cams).unsqueeze(1)\n img_feat_with_depth = (img_feat_with_depth * geom_xyz_valid).sum(3).unsqueeze(3)\n\n if self.radar_view_transform:\n radar_occupancy = pts_occupancy.permute(0, 2, 1, 3).contiguous()\n image_feature_collapsed = (image_feature * geom_xyz_valid.max(2).values).sum(2).unsqueeze(2)\n img_feat_with_radar = radar_occupancy.unsqueeze(1) * image_feature_collapsed.unsqueeze(2)\n\n img_context = torch.cat([img_feat_with_depth, img_feat_with_radar], dim=1)\n img_context = self._forward_view_aggregation_net(img_context)\n else:\n img_context = img_feat_with_depth\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['img_transform'].append(t3.elapsed_time(t4))\n\n img_context = self._split_batch_cam(img_context, num_cams=num_cams)\n img_context = img_context.permute(0, 1, 3, 4, 5, 2).contiguous()\n\n pts_context = self._split_batch_cam(pts_context, num_cams=num_cams)\n pts_context = pts_context.unsqueeze(-2).permute(0, 1, 3, 4, 5, 2).contiguous()\n\n fused_context = torch.cat([img_context, pts_context], dim=-1)\n\n geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n self.voxel_size).int()\n geom_xyz[..., 2] = 0 # collapse z-axis\n geo_pos = torch.ones_like(geom_xyz)\n \n # sparse voxel pooling\n feature_map, _ = average_voxel_pooling(geom_xyz, fused_context.contiguous(), geo_pos,\n self.voxel_num.cuda())\n if self.times is not None:\n t5.record()\n torch.cuda.synchronize()\n self.times['img_pool'].append(t4.elapsed_time(t5))\n\n if return_depth:\n return feature_map.contiguous(), depth_feature[:, :self.depth_channels].softmax(1)\n return feature_map.contiguous()\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n ptss_context,\n ptss_occupancy,\n times=None,\n return_depth=False):\n \"\"\"Forward function.\n\n Args:\n sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n num_cameras, 3, H, W).\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n ptss_context(Tensor): Input point context feature with shape of\n (B * num_cameras, num_sweeps, C, D, W).\n ptss_occupancy(Tensor): Input point occupancy with shape of\n (B * num_cameras, num_sweeps, 1, D, W).\n times(Dict, optional): Inference time measurement.\n is_return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Return:\n Tensor: bev feature map.\n \"\"\"\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n key_frame_res = self._forward_single_sweep(\n 0,\n sweep_imgs[:, 0:1, ...],\n mats_dict,\n ptss_context[:, 0, ...] if ptss_context is not None else None,\n ptss_occupancy[:, 0, ...] if ptss_occupancy is not None else None,\n return_depth=return_depth)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n if return_depth:\n return key_frame_res[0].unsqueeze(1), key_frame_res[1], self.times\n else:\n return key_frame_res.unsqueeze(1), self.times\n\n key_frame_feature = key_frame_res[0] if return_depth else key_frame_res\n ret_feature_list = [key_frame_feature]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n sweep_index,\n sweep_imgs[:, sweep_index:sweep_index + 1, ...],\n mats_dict,\n ptss_context[:, sweep_index, ...] if ptss_context is not None else None,\n ptss_occupancy[:, sweep_index, ...] if ptss_occupancy is not None else None,\n return_depth=False)\n ret_feature_list.append(feature_map)\n\n if return_depth:\n return torch.stack(ret_feature_list, 1), key_frame_res[1], self.times\n else:\n return torch.stack(ret_feature_list, 1), self.times" }, { "identifier": "PtsBackbone", "path": "layers/backbones/pts_backbone.py", "snippet": "class PtsBackbone(nn.Module):\n \"\"\"Pillar Feature Net.\n\n The network prepares the pillar features and performs forward pass\n through PFNLayers.\n\n Args:\n in_channels (int, optional): Number of input features,\n either x, y, z or x, y, z, r. Defaults to 4.\n feat_channels (tuple, optional): Number of features in each of the\n N PFNLayers. Defaults to (64, ).\n with_distance (bool, optional): Whether to include Euclidean distance\n to points. Defaults to False.\n with_cluster_center (bool, optional): [description]. Defaults to True.\n with_voxel_center (bool, optional): [description]. Defaults to True.\n voxel_size (tuple[float], optional): Size of voxels, only utilize x\n and y size. Defaults to (0.2, 0.2, 4).\n point_cloud_range (tuple[float], optional): Point cloud range, only\n utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1).\n norm_cfg ([type], optional): [description].\n Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).\n mode (str, optional): The mode to gather point features. Options are\n 'max' or 'avg'. Defaults to 'max'.\n legacy (bool, optional): Whether to use the new behavior or\n the original behavior. Defaults to True.\n \"\"\"\n\n def __init__(self,\n pts_voxel_layer,\n pts_voxel_encoder,\n pts_middle_encoder,\n pts_backbone,\n pts_neck,\n return_context=True,\n return_occupancy=True,\n **kwargs,\n ):\n super(PtsBackbone, self).__init__()\n\n self.pts_voxel_layer = Voxelization(**pts_voxel_layer)\n self.pts_voxel_encoder = builder.build_voxel_encoder(pts_voxel_encoder)\n self.pts_middle_encoder = builder.build_middle_encoder(pts_middle_encoder)\n self.pts_backbone = builder.build_backbone(pts_backbone)\n self.return_context = return_context\n self.return_occupancy = return_occupancy\n mid_channels = pts_backbone['out_channels'][-1]\n if pts_neck is not None:\n self.pts_neck = builder.build_neck(pts_neck)\n mid_channels = sum(pts_neck['out_channels'])\n else:\n self.pts_neck = None\n\n if self.return_context:\n if 'out_channels_pts' in kwargs:\n out_channels = kwargs['out_channels_pts']\n else:\n out_channels = 80\n self.pred_context = nn.Sequential(\n nn.Conv2d(mid_channels,\n mid_channels//2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode='reflect'),\n nn.BatchNorm2d(mid_channels//2),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels//2,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=0),\n )\n\n if self.return_occupancy:\n self.pred_occupancy = nn.Sequential(\n nn.Conv2d(mid_channels,\n mid_channels//2,\n kernel_size=3,\n stride=1,\n padding=1,\n padding_mode='reflect'),\n nn.BatchNorm2d(mid_channels//2),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels//2,\n 1,\n kernel_size=1,\n stride=1,\n padding=0),\n )\n\n if 'occupancy_init' in kwargs:\n occupancy_init = kwargs['occupancy_init']\n else:\n occupancy_init = 0.01\n self.pred_occupancy[-1].bias.data.fill_(bias_init_with_prob(occupancy_init))\n\n def voxelize(self, points):\n \"\"\"Apply dynamic voxelization to points.\n\n Args:\n points (list[torch.Tensor]): Points of each sample.\n\n Returns:\n tuple[torch.Tensor]: Concatenated points, number of points\n per voxel, and coordinates.\n \"\"\"\n voxels, coors, num_points = [], [], []\n batch_size, _, _ = points.shape\n points_list = [points[i] for i in range(batch_size)]\n\n for res in points_list:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch\n\n def _forward_single_sweep(self, pts):\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n B, N, P, F = pts.shape\n batch_size = B * N\n pts = pts.contiguous().view(B*N, P, F)\n\n voxels, num_points, coors = self.voxelize(pts)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['pts_voxelize'].append(t1.elapsed_time(t2))\n\n voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)\n x = self.pts_middle_encoder(voxel_features, coors, batch_size)\n x = self.pts_backbone(x)\n if self.pts_neck is not None:\n x = self.pts_neck(x)\n\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['pts_backbone'].append(t2.elapsed_time(t3))\n\n x_context = None\n x_occupancy = None\n if self.return_context:\n x_context = self.pred_context(x[-1]).unsqueeze(1)\n if self.return_occupancy:\n x_occupancy = self.pred_occupancy(x[-1]).unsqueeze(1).sigmoid()\n\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['pts_head'].append(t3.elapsed_time(t4))\n\n return x_context, x_occupancy\n\n def forward(self, ptss, times=None):\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, _, _ = ptss.shape\n\n key_context, key_occupancy = self._forward_single_sweep(ptss[:, 0, ...])\n \n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['pts'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n return key_context, key_occupancy, self.times\n\n context_list = [key_context]\n occupancy_list = [key_occupancy]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n context, occupancy = self._forward_single_sweep(ptss[:, sweep_index, ...])\n context_list.append(context)\n occupancy_list.append(occupancy)\n\n ret_context = None\n ret_occupancy = None\n if self.return_context:\n ret_context = torch.cat(context_list, 1)\n if self.return_occupancy:\n ret_occupancy = torch.cat(occupancy_list, 1)\n return ret_context, ret_occupancy, self.times" }, { "identifier": "MFAFuser", "path": "layers/fuser/multimodal_feature_aggregation.py", "snippet": "class MFAFuser(nn.Module):\n def __init__(self, num_sweeps=4, img_dims=80, pts_dims=128, embed_dims=256,\n num_layers=6, num_heads=4, bev_shape=(128, 128)):\n super(MFAFuser, self).__init__()\n\n self.num_modalities = 2\n self.use_cams_embeds = False\n\n self.num_heads = num_heads\n\n self.img_dims = img_dims\n self.pts_dims = pts_dims\n self.embed_dims = embed_dims\n _pos_dim_ = self.embed_dims//2\n _ffn_dim_ = self.embed_dims*2\n\n self.norm_img = build_norm_layer(dict(type='LN'), img_dims)[1]\n self.norm_pts = build_norm_layer(dict(type='LN'), pts_dims)[1]\n self.input_proj = nn.Linear(img_dims + pts_dims, self.embed_dims)\n\n self.bev_h, self.bev_w = bev_shape\n\n self.positional_encoding = build_positional_encoding(\n dict(\n type='LearnedPositionalEncoding',\n num_feats=_pos_dim_,\n row_num_embed=self.bev_h,\n col_num_embed=self.bev_w,\n ),\n )\n self.register_buffer('ref_2d', self.get_reference_points(self.bev_h, self.bev_w))\n\n ffn_cfgs = dict(\n type='FFN',\n embed_dims=self.embed_dims,\n feedforward_channels=_ffn_dim_,\n num_fcs=2,\n ffn_drop=0.1,\n act_cfg=dict(type='ReLU', inplace=True),\n )\n norm_cfgs = dict(type='LN')\n\n self.ffn_layers = ModuleList()\n for _ in range(num_layers):\n self.ffn_layers.append(\n build_feedforward_network(ffn_cfgs)\n )\n self.norm_layers1 = ModuleList()\n for _ in range(num_layers):\n self.norm_layers1.append(\n build_norm_layer(norm_cfgs, self.embed_dims)[1],\n )\n self.norm_layers2 = ModuleList()\n for _ in range(num_layers):\n self.norm_layers2.append(\n build_norm_layer(norm_cfgs, self.embed_dims)[1],\n )\n self.attn_layers = ModuleList()\n for _ in range(num_layers):\n self.attn_layers.append(\n DeformableCrossAttention(\n img_dims=self.img_dims,\n pts_dims=self.pts_dims,\n embed_dims=self.embed_dims,\n num_heads=self.num_heads,\n num_modalities=self.num_modalities,\n num_points=4\n ),\n )\n\n self.reduce_conv = nn.Sequential(\n nn.Conv2d(embed_dims*num_sweeps,\n embed_dims,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=False),\n nn.BatchNorm2d(embed_dims),\n nn.ReLU(inplace=True),\n )\n\n self.init_weights()\n\n def init_weights(self):\n \"\"\"Initialize the transformer weights.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, DeformableCrossAttention):\n try:\n m.init_weight()\n except AttributeError:\n m.init_weights()\n\n @staticmethod\n def get_reference_points(H, W, dtype=torch.float):\n \"\"\"Get the reference points used in SCA and TSA.\n Args:\n H, W: spatial shape of bev.\n Z: hight of pillar.\n D: sample D points uniformly from each pillar.\n device (obj:`device`): The device where\n reference_points should be.\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(\n 0.5, H - 0.5, H, dtype=dtype),\n torch.linspace(\n 0.5, W - 0.5, W, dtype=dtype)\n )\n ref_y = ref_y.reshape(-1)[None] / H\n ref_x = ref_x.reshape(-1)[None] / W\n ref_2d = torch.stack((ref_x, ref_y), -1)\n ref_2d = ref_2d.unsqueeze(2).unsqueeze(3)\n return ref_2d\n\n @auto_fp16(apply_to=('feat_img', 'feat_pts'))\n def _forward_single_sweep(self, feat_img, feat_pts):\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n bs = feat_img.shape[0]\n ref_2d_stack = self.ref_2d.repeat(bs, 1, 1, self.num_modalities, 1)\n\n feat_img = self.norm_img(feat_img.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2).contiguous()\n feat_pts = self.norm_pts(feat_pts.permute(0, 2, 3, 1).contiguous()).permute(0, 3, 1, 2).contiguous()\n\n feat_flatten = []\n spatial_shapes = []\n for feat in [feat_img, feat_pts]:\n _, _, h, w = feat.shape\n spatial_shape = (h, w)\n feat = feat.flatten(2).permute(0, 2, 1).contiguous() # [bs, num_cam, c, dw] -> [num_cam, bs, dw, c]\n spatial_shapes.append(spatial_shape)\n feat_flatten.append(feat)\n\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_img.device)\n level_start_index = torch.cat((spatial_shapes.new_zeros(\n (1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))\n\n bev_queries = torch.cat(feat_flatten, -1)\n bev_queries = self.input_proj(bev_queries)\n\n bev_mask = torch.zeros((bs, self.bev_h, self.bev_w),\n device=bev_queries.device).to(feat_img.dtype)\n bev_pos = self.positional_encoding(bev_mask).to(feat_img.dtype)\n bev_pos = bev_pos.flatten(2).permute(0, 2, 1).contiguous()\n\n feat_img = feat_flatten[0]\n feat_pts = feat_flatten[1]\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['fusion_pre'].append(t1.elapsed_time(t2))\n\n for attn_layer, ffn_layer, norm_layer1, norm_layer2 in \\\n zip(self.attn_layers, self.ffn_layers, self.norm_layers1, self.norm_layers2):\n # post norm\n bev_queries = attn_layer(\n bev_queries,\n feat_img,\n feat_pts,\n identity=None,\n query_pos=bev_pos,\n reference_points=ref_2d_stack,\n spatial_shapes=spatial_shapes,\n level_start_index=level_start_index,\n )\n bev_queries = norm_layer1(bev_queries)\n bev_queries = ffn_layer(bev_queries, identity=None)\n bev_queries = norm_layer2(bev_queries)\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['fusion_layer'].append(t2.elapsed_time(t3))\n\n output = bev_queries.permute(0, 2, 1).contiguous().reshape(bs, self.embed_dims, h, w)\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['fusion_post'].append(t3.elapsed_time(t4))\n\n return output\n\n def forward(self, feats, times=None):\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n num_sweeps = feats.shape[1]\n key_frame_res = self._forward_single_sweep(\n feats[:, 0, :self.img_dims],\n feats[:, 0, self.img_dims:self.img_dims+self.pts_dims]\n )\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['fusion'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n return key_frame_res, self.times\n\n ret_feature_list = [key_frame_res]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n feats[:, sweep_index, :self.img_dims],\n feats[:, sweep_index, self.img_dims:self.img_dims+self.pts_dims])\n ret_feature_list.append(feature_map)\n\n return self.reduce_conv(torch.cat(ret_feature_list, 1)).float(), self.times" }, { "identifier": "BEVDepthHead", "path": "layers/heads/bev_depth_head_det.py", "snippet": "class BEVDepthHead(CenterHead):\n \"\"\"Head for BevDepth.\n\n Args:\n in_channels(int): Number of channels after bev_neck.\n tasks(dict): Tasks for head.\n bbox_coder(dict): Config of bbox coder.\n common_heads(dict): Config of head for each task.\n loss_cls(dict): Config of classification loss.\n loss_bbox(dict): Config of regression loss.\n gaussian_overlap(float): Gaussian overlap used for `get_targets`.\n min_radius(int): Min radius used for `get_targets`.\n train_cfg(dict): Config used in the training process.\n test_cfg(dict): Config used in the test process.\n bev_backbone_conf(dict): Cnfig of bev_backbone.\n bev_neck_conf(dict): Cnfig of bev_neck.\n \"\"\"\n def __init__(\n self,\n in_channels=256,\n tasks=None,\n bbox_coder=None,\n common_heads=dict(),\n loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),\n loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n gaussian_overlap=0.1,\n min_radius=2,\n train_cfg=None,\n test_cfg=None,\n bev_backbone_conf=bev_backbone_conf,\n bev_neck_conf=bev_neck_conf,\n separate_head=dict(type='SeparateHead',\n init_bias=-2.19,\n final_kernel=3),\n ):\n super(BEVDepthHead, self).__init__(\n in_channels=in_channels,\n tasks=tasks,\n bbox_coder=bbox_coder,\n common_heads=common_heads,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n separate_head=separate_head,\n )\n self.trunk = build_backbone(bev_backbone_conf)\n self.trunk.init_weights()\n self.neck = build_neck(bev_neck_conf)\n self.neck.init_weights()\n del self.trunk.maxpool\n self.gaussian_overlap = gaussian_overlap\n self.min_radius = min_radius\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @autocast(False)\n def forward(self, x, times=None):\n \"\"\"Forward pass.\n\n Args:\n x (list[torch.Tensor]): Multi-level features, e.g.,\n features produced by FPN.\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n # FPN\n trunk_outs = [x]\n if self.trunk.deep_stem:\n x = self.trunk.stem(x)\n else:\n x = self.trunk.conv1(x)\n x = self.trunk.norm1(x)\n x = self.trunk.relu(x)\n for i, layer_name in enumerate(self.trunk.res_layers):\n res_layer = getattr(self.trunk, layer_name)\n x = res_layer(x)\n if i in self.trunk.out_indices:\n trunk_outs.append(x)\n fpn_output = self.neck(trunk_outs)\n\n if times is not None:\n t2.record()\n torch.cuda.synchronize()\n times['head_backbone'].append(t1.elapsed_time(t2))\n\n ret_values = super().forward(fpn_output)\n\n if times is not None:\n t3.record()\n torch.cuda.synchronize()\n times['head_head'].append(t2.elapsed_time(t3))\n times['head'].append(t1.elapsed_time(t3))\n\n return ret_values, times\n\n def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']\n grid_size = torch.tensor(self.train_cfg['grid_size'])\n pc_range = torch.tensor(self.train_cfg['point_cloud_range'])\n voxel_size = torch.tensor(self.train_cfg['voxel_size'])\n\n feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']\n\n # reorganize the gt_dict by tasks\n task_masks = []\n flag = 0\n for class_name in self.class_names:\n task_masks.append([\n torch.where(gt_labels_3d == class_name.index(i) + flag)\n for i in class_name\n ])\n flag += len(class_name)\n\n task_boxes = []\n task_classes = []\n flag2 = 0\n for idx, mask in enumerate(task_masks):\n task_box = []\n task_class = []\n for m in mask:\n task_box.append(gt_bboxes_3d[m])\n # 0 is background for each task, so we need to add 1 here.\n task_class.append(gt_labels_3d[m] + 1 - flag2)\n task_boxes.append(\n torch.cat(task_box, axis=0).to(gt_bboxes_3d.device))\n task_classes.append(\n torch.cat(task_class).long().to(gt_bboxes_3d.device))\n flag2 += len(mask)\n draw_gaussian = draw_heatmap_gaussian\n heatmaps, anno_boxes, inds, masks = [], [], [], []\n\n for idx, task_head in enumerate(self.task_heads):\n heatmap = gt_bboxes_3d.new_zeros(\n (len(self.class_names[idx]), feature_map_size[1],\n feature_map_size[0]),\n device='cuda')\n\n anno_box = gt_bboxes_3d.new_zeros((max_objs, 10),\n dtype=torch.float32,\n device='cuda')\n\n ind = gt_labels_3d.new_zeros((max_objs),\n dtype=torch.int64,\n device='cuda')\n mask = gt_bboxes_3d.new_zeros((max_objs),\n dtype=torch.uint8,\n device='cuda')\n\n num_objs = min(task_boxes[idx].shape[0], max_objs)\n\n for k in range(num_objs):\n cls_id = task_classes[idx][k] - 1\n\n width = task_boxes[idx][k][3]\n length = task_boxes[idx][k][4]\n width = width / voxel_size[0] / self.train_cfg[\n 'out_size_factor']\n length = length / voxel_size[1] / self.train_cfg[\n 'out_size_factor']\n\n if width > 0 and length > 0:\n radius = gaussian_radius(\n (length, width),\n min_overlap=self.train_cfg['gaussian_overlap'])\n radius = max(self.train_cfg['min_radius'], int(radius))\n\n # be really careful for the coordinate system of\n # your box annotation.\n x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][\n 1], task_boxes[idx][k][2]\n\n coor_x = (\n x - pc_range[0]\n ) / voxel_size[0] / self.train_cfg['out_size_factor']\n coor_y = (\n y - pc_range[1]\n ) / voxel_size[1] / self.train_cfg['out_size_factor']\n\n center = torch.tensor([coor_x, coor_y],\n dtype=torch.float32,\n device='cuda')\n center_int = center.to(torch.int32)\n\n # throw out not in range objects to avoid out of array\n # area when creating the heatmap\n if not (0 <= center_int[0] < feature_map_size[0]\n and 0 <= center_int[1] < feature_map_size[1]):\n continue\n\n draw_gaussian(heatmap[cls_id], center_int, radius)\n\n new_idx = k\n x, y = center_int[0], center_int[1]\n\n assert y * feature_map_size[0] + x < feature_map_size[\n 0] * feature_map_size[1]\n\n ind[new_idx] = y * feature_map_size[0] + x\n mask[new_idx] = 1\n\n vx, vy = task_boxes[idx][k][7:]\n rot = task_boxes[idx][k][6]\n box_dim = task_boxes[idx][k][3:6]\n if self.norm_bbox:\n box_dim = box_dim.log()\n anno_box[new_idx] = torch.cat([\n center - torch.tensor([x, y], device='cuda'),\n z.unsqueeze(0),\n box_dim,\n torch.sin(rot).unsqueeze(0),\n torch.cos(rot).unsqueeze(0),\n vx.unsqueeze(0),\n vy.unsqueeze(0),\n ])\n\n heatmaps.append(heatmap)\n anno_boxes.append(anno_box)\n masks.append(mask)\n inds.append(ind)\n return heatmaps, anno_boxes, inds, masks\n\n def loss(self, targets, preds_dicts, **kwargs):\n \"\"\"Loss function for BEVDepthHead.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n heatmaps, anno_boxes, inds, masks = targets\n return_loss = 0\n return_loss_heatmap, return_loss_bbox = 0, 0\n for task_id, preds_dict in enumerate(preds_dicts):\n # heatmap focal loss\n preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap'])\n num_pos = heatmaps[task_id].eq(1).float().sum().item()\n cls_avg_factor = torch.clamp(reduce_mean(\n heatmaps[task_id].new_tensor(num_pos)),\n min=1).item()\n loss_heatmap = self.loss_cls(preds_dict[0]['heatmap'],\n heatmaps[task_id],\n avg_factor=cls_avg_factor)\n target_box = anno_boxes[task_id]\n # reconstruct the anno_box from multiple reg heads\n preds_dict[0]['anno_box'] = torch.cat(\n (\n preds_dict[0]['reg'],\n preds_dict[0]['height'],\n preds_dict[0]['dim'],\n preds_dict[0]['rot'],\n preds_dict[0]['vel'],\n ),\n dim=1,\n )\n\n # Regression loss for dimension, offset, height, rotation\n num = masks[task_id].float().sum()\n ind = inds[task_id]\n pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous()\n pred = pred.view(pred.size(0), -1, pred.size(3))\n pred = self._gather_feat(pred, ind)\n mask = masks[task_id].unsqueeze(2).expand_as(target_box).float()\n num = torch.clamp(reduce_mean(target_box.new_tensor(num)),\n min=1e-4).item()\n isnotnan = (~torch.isnan(target_box)).float()\n mask *= isnotnan\n code_weights = self.train_cfg['code_weights']\n bbox_weights = mask * mask.new_tensor(code_weights)\n loss_bbox = self.loss_bbox(pred,\n target_box,\n bbox_weights,\n avg_factor=num)\n return_loss += loss_bbox\n return_loss += loss_heatmap\n return_loss_bbox += loss_bbox\n return_loss_heatmap += loss_heatmap\n return return_loss, return_loss_heatmap, return_loss_bbox" } ]
import mmcv from models.base_bev_depth import BaseBEVDepth from layers.backbones.rvt_lss_fpn import RVTLSSFPN from layers.backbones.pts_backbone import PtsBackbone from layers.fuser.multimodal_feature_aggregation import MFAFuser from layers.heads.bev_depth_head_det import BEVDepthHead
11,825
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet'] class CameraRadarNetDet(BaseBEVDepth): """Source code of `CRN`, `https://arxiv.org/abs/2304.00670`. Args: backbone_img_conf (dict): Config of image backbone. backbone_pts_conf (dict): Config of point backbone. fuser_conf (dict): Config of BEV feature fuser. head_conf (dict): Config of head. """ def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = RVTLSSFPN(**backbone_img_conf) self.backbone_pts = PtsBackbone(**backbone_pts_conf)
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['CameraRadarNetDet'] class CameraRadarNetDet(BaseBEVDepth): """Source code of `CRN`, `https://arxiv.org/abs/2304.00670`. Args: backbone_img_conf (dict): Config of image backbone. backbone_pts_conf (dict): Config of point backbone. fuser_conf (dict): Config of BEV feature fuser. head_conf (dict): Config of head. """ def __init__(self, backbone_img_conf, backbone_pts_conf, fuser_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = RVTLSSFPN(**backbone_img_conf) self.backbone_pts = PtsBackbone(**backbone_pts_conf)
self.fuser = MFAFuser(**fuser_conf)
3
2023-12-06 14:57:49+00:00
16k
LIU-Yuxin/SyncMVD
src/pipeline.py
[ { "identifier": "UVProjection", "path": "src/renderer/project.py", "snippet": "class UVProjection():\n\tdef __init__(self, texture_size=96, render_size=64, sampling_mode=\"nearest\", channels=3, device=None):\n\t\tself.channels = channels\n\t\tself.device = device or torch.device(\"cpu\")\n\t\tself.lights = AmbientLights(ambient_color=((1.0,)*channels,), device=self.device)\n\t\tself.target_size = (texture_size,texture_size)\n\t\tself.render_size = render_size\n\t\tself.sampling_mode = sampling_mode\n\n\n\t# Load obj mesh, rescale the mesh to fit into the bounding box\n\tdef load_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tmesh = load_objs_as_meshes([mesh_path], device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2\n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\t\t\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\tdef load_glb_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False):\n\t\tfrom pytorch3d.io.experimental_gltf_io import MeshGlbFormat\n\t\tio = IO()\n\t\tio.register_meshes_format(MeshGlbFormat())\n\t\twith open(mesh_path, \"rb\") as f:\n\t\t\tmesh = io.load_mesh(f, include_textures=True, device=self.device)\n\t\tif auto_center:\n\t\t\tverts = mesh.verts_packed()\n\t\t\tmax_bb = (verts - 0).max(0)[0]\n\t\t\tmin_bb = (verts - 0).min(0)[0]\n\t\t\tscale = (max_bb - min_bb).max()/2 \n\t\t\tcenter = (max_bb+min_bb) /2\n\t\t\tmesh.offset_verts_(-center)\n\t\t\tmesh.scale_verts_((scale_factor / float(scale)))\n\t\telse:\n\t\t\tmesh.scale_verts_((scale_factor))\n\t\tif autouv or (mesh.textures is None):\n\t\t\tmesh = self.uv_unwrap(mesh)\n\t\tself.mesh = mesh\n\n\n\t# Save obj mesh\n\tdef save_mesh(self, mesh_path, texture):\n\t\tsave_obj(mesh_path, \n\t\t\t\tself.mesh.verts_list()[0],\n\t\t\t\tself.mesh.faces_list()[0],\n\t\t\t\tverts_uvs= self.mesh.textures.verts_uvs_list()[0],\n\t\t\t\tfaces_uvs= self.mesh.textures.faces_uvs_list()[0],\n\t\t\t\ttexture_map=texture)\n\n\t# Code referred to TEXTure code (https://github.com/TEXTurePaper/TEXTurePaper.git)\n\tdef uv_unwrap(self, mesh):\n\t\tverts_list = mesh.verts_list()[0]\n\t\tfaces_list = mesh.faces_list()[0]\n\n\n\t\timport xatlas\n\t\timport numpy as np\n\t\tv_np = verts_list.cpu().numpy()\n\t\tf_np = faces_list.int().cpu().numpy()\n\t\tatlas = xatlas.Atlas()\n\t\tatlas.add_mesh(v_np, f_np)\n\t\tchart_options = xatlas.ChartOptions()\n\t\tchart_options.max_iterations = 4\n\t\tatlas.generate(chart_options=chart_options)\n\t\tvmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n\t\tvt = torch.from_numpy(vt_np.astype(np.float32)).type(verts_list.dtype).to(mesh.device)\n\t\tft = torch.from_numpy(ft_np.astype(np.int64)).type(faces_list.dtype).to(mesh.device)\n\n\t\tnew_map = torch.zeros(self.target_size+(self.channels,), device=mesh.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\t[ft], \n\t\t\t[vt], \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\n\t\tmesh.textures = new_tex\n\t\treturn mesh\n\n\n\t'''\n\t\tA functions that disconnect faces in the mesh according to\n\t\tits UV seams. The number of vertices are made equal to the\n\t\tnumber of unique vertices its UV layout, while the faces list\n\t\tis intact.\n\t'''\n\tdef disconnect_faces(self):\n\t\tmesh = self.mesh\n\t\tverts_list = mesh.verts_list()\n\t\tfaces_list = mesh.faces_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\tfaces_uvs_list = mesh.textures.faces_uvs_list()\n\t\tpacked_list = [v[f] for v,f in zip(verts_list, faces_list)]\n\t\tverts_disconnect_list = [\n\t\t\ttorch.zeros(\n\t\t\t\t(verts_uvs_list[i].shape[0], 3), \n\t\t\t\tdtype=verts_list[0].dtype, \n\t\t\t\tdevice=verts_list[0].device\n\t\t\t) \n\t\t\tfor i in range(len(verts_list))]\n\t\tfor i in range(len(verts_list)):\n\t\t\tverts_disconnect_list[i][faces_uvs_list] = packed_list[i]\n\t\tassert not mesh.has_verts_normals(), \"Not implemented for vertex normals\"\n\t\tself.mesh_d = Meshes(verts_disconnect_list, faces_uvs_list, mesh.textures)\n\t\treturn self.mesh_d\n\n\n\t'''\n\t\tA function that construct a temp mesh for back-projection.\n\t\tTake a disconnected mesh and a rasterizer, the function calculates\n\t\tthe projected faces as the UV, as use its original UV with pseudo\n\t\tz value as world space geometry.\n\t'''\n\tdef construct_uv_mesh(self):\n\t\tmesh = self.mesh_d\n\t\tverts_list = mesh.verts_list()\n\t\tverts_uvs_list = mesh.textures.verts_uvs_list()\n\t\t# faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()]\n\t\tnew_verts_list = []\n\t\tfor i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)):\n\t\t\tverts = verts.clone()\n\t\t\tverts_uv = verts_uv.clone()\n\t\t\tverts[...,0:2] = verts_uv[...,:]\n\t\t\tverts = (verts - 0.5) * 2\n\t\t\tverts[...,2] *= 1\n\t\t\tnew_verts_list.append(verts)\n\t\ttextures_uv = mesh.textures.clone()\n\t\tself.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv)\n\t\treturn self.mesh_uv\n\n\n\t# Set texture for the current mesh.\n\tdef set_texture_map(self, texture):\n\t\tnew_map = texture.permute(1, 2, 0)\n\t\tnew_map = new_map.to(self.device)\n\t\tnew_tex = TexturesUV(\n\t\t\t[new_map], \n\t\t\tself.mesh.textures.faces_uvs_padded(), \n\t\t\tself.mesh.textures.verts_uvs_padded(), \n\t\t\tsampling_mode=self.sampling_mode\n\t\t\t)\n\t\tself.mesh.textures = new_tex\n\n\n\t# Set the initial normal noise texture\n\t# No generator here for replication of the experiment result. Add one as you wish\n\tdef set_noise_texture(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tnoise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device)\n\t\tself.set_texture_map(noise_texture)\n\t\treturn noise_texture\n\n\n\t# Set the cameras given the camera poses and centers\n\tdef set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None):\n\t\telev = torch.FloatTensor([pose[0] for pose in camera_poses])\n\t\tazim = torch.FloatTensor([pose[1] for pose in camera_poses])\n\t\tR, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),))\n\t\tself.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),))\n\n\n\t# Set all necessary internal data for rendering and texture baking\n\t# Can be used to refresh after changing camera positions\n\tdef set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None):\n\t\tself.set_cameras(camera_poses, centers, camera_distance, scale=scale)\n\t\tif render_size is None:\n\t\t\trender_size = self.render_size\n\t\tif not hasattr(self, \"renderer\"):\n\t\t\tself.setup_renderer(size=render_size)\n\t\tif not hasattr(self, \"mesh_d\"):\n\t\t\tself.disconnect_faces()\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\t\tself.calculate_tex_gradient()\n\t\tself.calculate_visible_triangle_mask()\n\t\t_,_,_,cos_maps,_, _ = self.render_geometry()\n\t\tself.calculate_cos_angle_weights(cos_maps)\n\n\n\t# Setup renderers for rendering\n\t# max faces per bin set to 30000 to avoid overflow in many test cases.\n\t# You can use default value to let pytorch3d handle that for you.\n\tdef setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tself.raster_settings = RasterizationSettings(\n\t\t\timage_size=size, \n\t\t\tblur_radius=blur, \n\t\t\tfaces_per_pixel=face_per_pix,\n\t\t\tperspective_correct=perspective_correct,\n\t\t\tcull_backfaces=True,\n\t\t\tmax_faces_per_bin=30000,\n\t\t)\n\n\t\tself.renderer = MeshRenderer(\n\t\t\trasterizer=MeshRasterizer(\n\t\t\t\tcameras=self.cameras, \n\t\t\t\traster_settings=self.raster_settings,\n\n\t\t\t),\n\t\t\tshader=HardNChannelFlatShader(\n\t\t\t\tdevice=self.device, \n\t\t\t\tcameras=self.cameras,\n\t\t\t\tlights=self.lights,\n\t\t\t\tchannels=channels\n\t\t\t\t# materials=materials\n\t\t\t)\n\t\t)\n\n\n\t# Bake screen-space cosine weights to UV space\n\t# May be able to reimplement using the generic \"bake_texture\" function, but it works so leave it here for now\n\[email protected]_grad()\n\tdef calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tcos_maps = []\n\t\ttmp_mesh = self.mesh.clone()\n\t\tfor i in range(len(self.cameras)):\n\t\t\t\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\n\t\t\tloss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tif fill:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\t\tzero_map = voronoi_solve(zero_map, self.gradient_maps[i][...,0])\n\t\t\telse:\n\t\t\t\tzero_map = zero_map.detach() / (self.gradient_maps[i]+1E-8)\n\t\t\tcos_maps.append(zero_map)\n\t\tself.cos_maps = cos_maps\n\n\t\t\n\t# Get geometric info from fragment shader\n\t# Can be used for generating conditioning image and cosine weights\n\t# Returns some information you may not need, remember to release them for memory saving\n\[email protected]_grad()\n\tdef render_geometry(self, image_size=None):\n\t\tif image_size:\n\t\t\tsize = self.renderer.rasterizer.raster_settings.image_size\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = image_size\n\t\tshader = self.renderer.shader\n\t\tself.renderer.shader = HardGeometryShader(device=self.device, cameras=self.cameras[0], lights=self.lights)\n\t\ttmp_mesh = self.mesh.clone()\n\t\t\n\t\tverts, normals, depths, cos_angles, texels, fragments = self.renderer(tmp_mesh.extend(len(self.cameras)), cameras=self.cameras, lights=self.lights)\n\t\tself.renderer.shader = shader\n\n\t\tif image_size:\n\t\t\tself.renderer.rasterizer.raster_settings.image_size = size\n\n\t\treturn verts, normals, depths, cos_angles, texels, fragments\n\n\n\t# Project world normal to view space and normalize\n\[email protected]_grad()\n\tdef decode_view_normal(self, normals):\n\t\tw2v_mat = self.cameras.get_full_projection_transform()\n\t\tnormals_view = torch.clone(normals)[:,:,:,0:3]\n\t\tnormals_view = normals_view.reshape(normals_view.shape[0], -1, 3)\n\t\tnormals_view = w2v_mat.transform_normals(normals_view)\n\t\tnormals_view = normals_view.reshape(normals.shape[0:3]+(3,))\n\t\tnormals_view[:,:,:,2] *= -1\n\t\tnormals = (normals_view[...,0:3]+1) * normals[...,3:] / 2 + torch.FloatTensor(((((0.5,0.5,1))))).to(self.device) * (1 - normals[...,3:])\n\t\t# normals = torch.cat([normal for normal in normals], dim=1)\n\t\tnormals = normals.clamp(0, 1)\n\t\treturn normals\n\n\n\t# Normalize absolute depth to inverse depth\n\[email protected]_grad()\n\tdef decode_normalized_depth(self, depths, batched_norm=False):\n\t\tview_z, mask = depths.unbind(-1)\n\t\tview_z = view_z * mask + 100 * (1-mask)\n\t\tinv_z = 1 / view_z\n\t\tinv_z_min = inv_z * mask + 100 * (1-mask)\n\t\tif not batched_norm:\n\t\t\tmax_ = torch.max(inv_z, 1, keepdim=True)\n\t\t\tmax_ = torch.max(max_[0], 2, keepdim=True)[0]\n\n\t\t\tmin_ = torch.min(inv_z_min, 1, keepdim=True)\n\t\t\tmin_ = torch.min(min_[0], 2, keepdim=True)[0]\n\t\telse:\n\t\t\tmax_ = torch.max(inv_z)\n\t\t\tmin_ = torch.min(inv_z_min)\n\t\tinv_z = (inv_z - min_) / (max_ - min_)\n\t\tinv_z = inv_z.clamp(0,1)\n\t\tinv_z = inv_z[...,None].repeat(1,1,1,3)\n\n\t\treturn inv_z\n\n\n\t# Multiple screen pixels could pass gradient to a same texel\n\t# We can precalculate this gradient strength and use it to normalize gradients when we bake textures\n\[email protected]_grad()\n\tdef calculate_tex_gradient(self, channels=None):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\ttmp_mesh = self.mesh.clone()\n\t\tgradient_maps = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tzero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True)\n\t\t\toptimizer = torch.optim.SGD([zero_map], lr=1, momentum=0)\n\t\t\toptimizer.zero_grad()\n\t\t\tzero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = zero_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights)\n\t\t\tloss = torch.sum((1 - images_predicted)**2)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\n\t\t\tgradient_maps.append(zero_map.detach())\n\n\t\tself.gradient_maps = gradient_maps\n\n\n\t# Get the UV space masks of triangles visible in each view\n\t# First get face ids from each view, then filter pixels on UV space to generate masks\n\[email protected]_grad()\n\tdef calculate_visible_triangle_mask(self, channels=None, image_size=(512,512)):\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\n\t\tpix2face_list = []\n\t\tfor i in range(len(self.cameras)):\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=image_size\n\t\t\tpix2face = self.renderer.rasterizer(self.mesh_d, cameras=self.cameras[i]).pix_to_face\n\t\t\tself.renderer.rasterizer.raster_settings.image_size=self.render_size\n\t\t\tpix2face_list.append(pix2face)\n\n\t\tif not hasattr(self, \"mesh_uv\"):\n\t\t\tself.construct_uv_mesh()\n\n\t\traster_settings = RasterizationSettings(\n\t\t\timage_size=self.target_size, \n\t\t\tblur_radius=0, \n\t\t\tfaces_per_pixel=1,\n\t\t\tperspective_correct=False,\n\t\t\tcull_backfaces=False,\n\t\t\tmax_faces_per_bin=30000,\n\t\t\t)\n\n\t\tR, T = look_at_view_transform(dist=2, elev=0, azim=0)\n\t\tcameras = FoVOrthographicCameras(device=self.device, R=R, T=T)\n\n\t\trasterizer=MeshRasterizer(\n\t\t\tcameras=cameras, \n\t\t\traster_settings=raster_settings\n\t\t)\n\t\tuv_pix2face = rasterizer(self.mesh_uv).pix_to_face\n\n\t\tvisible_triangles = []\n\t\tfor i in range(len(pix2face_list)):\n\t\t\tvalid_faceid = torch.unique(pix2face_list[i])\n\t\t\tvalid_faceid = valid_faceid[1:] if valid_faceid[0]==-1 else valid_faceid\n\t\t\tmask = torch.isin(uv_pix2face[0], valid_faceid, assume_unique=False)\n\t\t\t# uv_pix2face[0][~mask] = -1\n\t\t\ttriangle_mask = torch.ones(self.target_size+(1,), device=self.device)\n\t\t\ttriangle_mask[~mask] = 0\n\t\t\t\n\t\t\ttriangle_mask[:,1:][triangle_mask[:,:-1] > 0] = 1\n\t\t\ttriangle_mask[:,:-1][triangle_mask[:,1:] > 0] = 1\n\t\t\ttriangle_mask[1:,:][triangle_mask[:-1,:] > 0] = 1\n\t\t\ttriangle_mask[:-1,:][triangle_mask[1:,:] > 0] = 1\n\t\t\tvisible_triangles.append(triangle_mask)\n\n\t\tself.visible_triangles = visible_triangles\n\n\n\n\t# Render the current mesh and texture from current cameras\n\tdef render_textured_views(self):\n\t\tmeshes = self.mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(meshes, cameras=self.cameras, lights=self.lights)\n\n\t\treturn [image.permute(2, 0, 1) for image in images_predicted]\n\n\n\t# Bake views into a texture\n\t# First bake into individual textures then combine based on cosine weight\n\[email protected]_grad()\n\tdef bake_texture(self, views=None, main_views=[], cos_weighted=True, channels=None, exp=None, noisy=False, generator=None):\n\t\tif not exp:\n\t\t\texp=1\n\t\tif not channels:\n\t\t\tchannels = self.channels\n\t\tviews = [view.permute(1, 2, 0) for view in views]\n\n\t\ttmp_mesh = self.mesh\n\t\tbake_maps = [torch.zeros(self.target_size+(views[0].shape[2],), device=self.device, requires_grad=True) for view in views]\n\t\toptimizer = torch.optim.SGD(bake_maps, lr=1, momentum=0)\n\t\toptimizer.zero_grad()\n\t\tloss = 0\n\t\tfor i in range(len(self.cameras)): \n\t\t\tbake_tex = TexturesUV([bake_maps[i]], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\t\ttmp_mesh.textures = bake_tex\n\t\t\timages_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights, device=self.device)\n\t\t\tpredicted_rgb = images_predicted[..., :-1]\n\t\t\tloss += (((predicted_rgb[...] - views[i]))**2).sum()\n\t\tloss.backward(retain_graph=False)\n\t\toptimizer.step()\n\n\t\ttotal_weights = 0\n\t\tbaked = 0\n\t\tfor i in range(len(bake_maps)):\n\t\t\tnormalized_baked_map = bake_maps[i].detach() / (self.gradient_maps[i] + 1E-8)\n\t\t\tbake_map = voronoi_solve(normalized_baked_map, self.gradient_maps[i][...,0])\n\t\t\tweight = self.visible_triangles[i] * (self.cos_maps[i]) ** exp\n\t\t\tif noisy:\n\t\t\t\tnoise = torch.rand(weight.shape[:-1]+(1,), generator=generator).type(weight.dtype).to(weight.device)\n\t\t\t\tweight *= noise\n\t\t\ttotal_weights += weight\n\t\t\tbaked += bake_map * weight\n\t\tbaked /= total_weights + 1E-8\n\t\tbaked = voronoi_solve(baked, total_weights[...,0])\n\n\t\tbake_tex = TexturesUV([baked], tmp_mesh.textures.faces_uvs_padded(), tmp_mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode)\n\t\ttmp_mesh.textures = bake_tex\n\t\textended_mesh = tmp_mesh.extend(len(self.cameras))\n\t\timages_predicted = self.renderer(extended_mesh, cameras=self.cameras, lights=self.lights)\n\t\tlearned_views = [image.permute(2, 0, 1) for image in images_predicted]\n\n\t\treturn learned_views, baked.permute(2, 0, 1), total_weights.permute(2, 0, 1)\n\n\n\t# Move the internel data to a specific device\n\tdef to(self, device):\n\t\tfor mesh_name in [\"mesh\", \"mesh_d\", \"mesh_uv\"]:\n\t\t\tif hasattr(self, mesh_name):\n\t\t\t\tmesh = getattr(self, mesh_name)\n\t\t\t\tsetattr(self, mesh_name, mesh.to(device))\n\t\tfor list_name in [\"visible_triangles\", \"visibility_maps\", \"cos_maps\"]:\n\t\t\tif hasattr(self, list_name):\n\t\t\t\tmap_list = getattr(self, list_name)\n\t\t\t\tfor i in range(len(map_list)):\n\t\t\t\t\tmap_list[i] = map_list[i].to(device)" }, { "identifier": "SamplewiseAttnProcessor2_0", "path": "src/syncmvd/attention.py", "snippet": "class SamplewiseAttnProcessor2_0:\n\tr\"\"\"\n\tProcessor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).\n\t\"\"\"\n\n\tdef __init__(self, custom_attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\t\tif not hasattr(F, \"scaled_dot_product_attention\"):\n\t\t\traise ImportError(\"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.\")\n\t\tself.ref_weight = ref_weight\n\t\tself.custom_attention_mask = custom_attention_mask\n\t\tself.ref_attention_mask = ref_attention_mask\n\n\tdef __call__(\n\t\tself,\n\t\tattn: Attention,\n\t\thidden_states,\n\t\tencoder_hidden_states=None,\n\t\tattention_mask=None,\n\t\ttemb=None,\n\t):\n\n\t\tresidual = hidden_states\n\n\t\tif attn.spatial_norm is not None:\n\t\t\thidden_states = attn.spatial_norm(hidden_states, temb)\n\n\t\tinput_ndim = hidden_states.ndim\n\n\n\t\tif input_ndim == 4:\n\t\t\tbatch_size, channel, height, width = hidden_states.shape\n\t\t\thidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)\n\n\t\tbatch_size, sequence_length, channels = (\n\t\t\thidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape\n\t\t)\n\n\t\tif attention_mask is not None:\n\t\t\tattention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n\t\t\t# scaled_dot_product_attention expects attention_mask shape to be\n\t\t\t# (batch, heads, source_length, target_length)\n\t\t\tattention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])\n\n\t\tif attn.group_norm is not None:\n\t\t\thidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)\n\n\t\tquery = attn.to_q(hidden_states)\n\n\t\tif encoder_hidden_states is None:\n\t\t\tencoder_hidden_states = torch.clone(hidden_states)\n\t\telif attn.norm_cross:\n\t\t\tencoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)\n\n\n\t\t'''\n\t\t\treshape encoder hidden state to a single batch\n\t\t'''\n\t\tencoder_hidden_states_f = encoder_hidden_states.reshape(1, -1, channels)\n\n\n\n\t\tkey = attn.to_k(encoder_hidden_states)\n\t\tvalue = attn.to_v(encoder_hidden_states)\n\n\t\tinner_dim = key.shape[-1]\n\t\thead_dim = inner_dim // attn.heads\n\n\t\tquery = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)\n\n\t\t'''\n\t\t\teach time select 1 sample from q and compute with concated kv\n\t\t\tconcat result hidden states afterwards\n\t\t'''\n\t\thidden_state_list = []\n\n\t\tfor b_idx in range(batch_size):\n\t\t\t\n\t\t\tquery_b = query[b_idx:b_idx+1]\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\tkey_ref = key.clone()\n\t\t\t\tvalue_ref = value.clone()\n\n\t\t\t\tkeys = [key_ref[view_idx] for view_idx in self.ref_attention_mask]\n\t\t\t\tvalues = [value_ref[view_idx] for view_idx in self.ref_attention_mask]\n\n\t\t\t\tkey_ref = torch.stack(keys)\n\t\t\t\tkey_ref = key_ref.view(key_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t\tvalue_ref = torch.stack(values)\n\t\t\t\tvalue_ref = value_ref.view(value_ref.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\tkey_a = key.clone()\n\t\t\tvalue_a = value.clone()\n\n\t\t\t# key_a = key_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\n\t\t\tkeys = [key_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\t\t\tvalues = [value_a[view_idx] for view_idx in self.custom_attention_mask[b_idx]]\n\n\t\t\t# keys = (key_a[b_idx-1], key_a[b_idx], key_a[(b_idx+1)%batch_size])\n\t\t\t# values = (value_a[b_idx-1], value_a[b_idx], value_a[(b_idx+1)%batch_size])\n\t\t\t\n\t\t\t# if b_idx not in [0, batch_size-1, batch_size//2]:\n\t\t\t# \tkeys = keys + (key_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\t# \tvalues = values + (value_a[min(batch_size-2, 2*(batch_size//2) - b_idx)],)\n\t\t\tkey_a = torch.stack(keys)\n\t\t\tkey_a = key_a.view(key_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\t# value_a = value_a[max(0,b_idx-1):min(b_idx+1,batch_size)+1]\n\t\t\tvalue_a = torch.stack(values)\n\t\t\tvalue_a = value_a.view(value_a.shape[0], -1, attn.heads, head_dim).permute(2, 0, 1, 3).contiguous().view(attn.heads, -1, head_dim)[None,...]\n\n\t\t\thidden_state_a = F.scaled_dot_product_attention(\n\t\t\t\tquery_b, key_a, value_a, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t)\n\n\t\t\tif self.ref_weight > 0 or True:\n\t\t\t\thidden_state_ref = F.scaled_dot_product_attention(\n\t\t\t\t\tquery_b, key_ref, value_ref, attn_mask=None, dropout_p=0.0, is_causal=False\n\t\t\t\t)\n\n\t\t\t\thidden_state = (hidden_state_a + self.ref_weight * hidden_state_ref) / (1+self.ref_weight)\n\t\t\telse:\n\t\t\t\thidden_state = hidden_state_a\n\n\t\t\t# the output of sdp = (batch, num_heads, seq_len, head_dim)\n\t\t\t# TODO: add support for attn.scale when we move to Torch 2.1\n\t\t\t\n\t\t\thidden_state_list.append(hidden_state)\n\n\t\thidden_states = torch.cat(hidden_state_list)\n\n\n\t\thidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)\n\t\thidden_states = hidden_states.to(query.dtype)\n\n\t\t# linear proj\n\t\thidden_states = attn.to_out[0](hidden_states)\n\t\t# dropout\n\t\thidden_states = attn.to_out[1](hidden_states)\n\n\t\tif input_ndim == 4:\n\t\t\thidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)\n\n\t\tif attn.residual_connection:\n\t\t\thidden_states = hidden_states + residual\n\n\t\thidden_states = hidden_states / attn.rescale_output_factor\n\n\t\treturn hidden_states" }, { "identifier": "replace_attention_processors", "path": "src/syncmvd/attention.py", "snippet": "def replace_attention_processors(module, processor, attention_mask=None, ref_attention_mask=None, ref_weight=0):\n\tattn_processors = module.attn_processors\n\tfor k, v in attn_processors.items():\n\t\tif \"attn1\" in k:\n\t\t\tattn_processors[k] = processor(custom_attention_mask=attention_mask, ref_attention_mask=ref_attention_mask, ref_weight=ref_weight)\n\tmodule.set_attn_processor(attn_processors)" }, { "identifier": "step_tex", "path": "src/syncmvd/step.py", "snippet": "@torch.no_grad()\ndef step_tex(\n\t\tscheduler,\n\t\tuvp,\n\t\tmodel_output: torch.FloatTensor,\n\t\ttimestep: int,\n\t\tsample: torch.FloatTensor,\n\t\ttexture: None,\n\t\tgenerator=None,\n\t\treturn_dict: bool = True,\n\t\tguidance_scale = 1,\n\t\tmain_views = [],\n\t\thires_original_views = True,\n\t\texp=None,\n\t\tcos_weighted=True\n):\n\tt = timestep\n\n\tprev_t = scheduler.previous_timestep(t)\n\n\tif model_output.shape[1] == sample.shape[1] * 2 and scheduler.variance_type in [\"learned\", \"learned_range\"]:\n\t\tmodel_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)\n\telse:\n\t\tpredicted_variance = None\n\n\t# 1. compute alphas, betas\n\talpha_prod_t = scheduler.alphas_cumprod[t]\n\talpha_prod_t_prev = scheduler.alphas_cumprod[prev_t] if prev_t >= 0 else scheduler.one\n\tbeta_prod_t = 1 - alpha_prod_t\n\tbeta_prod_t_prev = 1 - alpha_prod_t_prev\n\tcurrent_alpha_t = alpha_prod_t / alpha_prod_t_prev\n\tcurrent_beta_t = 1 - current_alpha_t\n\n\t# 2. compute predicted original sample from predicted noise also called\n\t# \"predicted x_0\" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf\n\tif scheduler.config.prediction_type == \"epsilon\":\n\t\tpred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n\telif scheduler.config.prediction_type == \"sample\":\n\t\tpred_original_sample = model_output\n\telif scheduler.config.prediction_type == \"v_prediction\":\n\t\tpred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n\telse:\n\t\traise ValueError(\n\t\t\tf\"prediction_type given as {scheduler.config.prediction_type} must be one of `epsilon`, `sample` or\"\n\t\t\t\" `v_prediction` for the DDPMScheduler.\"\n\t\t)\n\n\t# 3. Clip or threshold \"predicted x_0\"\n\tif scheduler.config.thresholding:\n\t\tpred_original_sample = scheduler._threshold_sample(pred_original_sample)\n\telif scheduler.config.clip_sample:\n\t\tpred_original_sample = pred_original_sample.clamp(\n\t\t\t-scheduler.config.clip_sample_range, scheduler.config.clip_sample_range\n\t\t)\n\n\t# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\tpred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t\n\tcurrent_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t\n\n\t'''\n\t\tAdd multidiffusion here\n\t'''\n\n\tif texture is None:\n\t\tsample_views = [view for view in sample]\n\t\tsample_views, texture, _ = uvp.bake_texture(views=sample_views, main_views=main_views, exp=exp)\n\t\tsample_views = torch.stack(sample_views, axis=0)[:,:-1,...]\n\n\n\toriginal_views = [view for view in pred_original_sample]\n\toriginal_views, original_tex, visibility_weights = uvp.bake_texture(views=original_views, main_views=main_views, exp=exp)\n\tuvp.set_texture_map(original_tex)\n\toriginal_views = uvp.render_textured_views()\n\toriginal_views = torch.stack(original_views, axis=0)[:,:-1,...]\n\n\t# 5. Compute predicted previous sample µ_t\n\t# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n\t# pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample\n\tprev_tex = pred_original_sample_coeff * original_tex + current_sample_coeff * texture\n\n\t# 6. Add noise\n\tvariance = 0\n\n\tif predicted_variance is not None:\n\t\tvariance_views = [view for view in predicted_variance]\n\t\tvariance_views, variance_tex, visibility_weights = uvp.bake_texture(views=variance_views, main_views=main_views, cos_weighted=cos_weighted, exp=exp)\n\t\tvariance_views = torch.stack(variance_views, axis=0)[:,:-1,...]\n\telse:\n\t\tvariance_tex = None\n\n\tif t > 0:\n\t\tdevice = texture.device\n\t\tvariance_noise = randn_tensor(\n\t\t\ttexture.shape, generator=generator, device=device, dtype=texture.dtype\n\t\t)\n\t\tif scheduler.variance_type == \"fixed_small_log\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex) * variance_noise\n\t\telif scheduler.variance_type == \"learned_range\":\n\t\t\tvariance = scheduler._get_variance(t, predicted_variance=variance_tex)\n\t\t\tvariance = torch.exp(0.5 * variance) * variance_noise\n\t\telse:\n\t\t\tvariance = (scheduler._get_variance(t, predicted_variance=variance_tex) ** 0.5) * variance_noise\n\n\tprev_tex = prev_tex + variance\n\n\tuvp.set_texture_map(prev_tex)\n\tprev_views = uvp.render_textured_views()\n\tpred_prev_sample = torch.clone(sample)\n\tfor i, view in enumerate(prev_views):\n\t\tpred_prev_sample[i] = view[:-1]\n\tmasks = [view[-1:] for view in prev_views]\n\n\treturn {\"prev_sample\": pred_prev_sample, \"pred_original_sample\":pred_original_sample, \"prev_tex\": prev_tex}\n\n\tif not return_dict:\n\t\treturn pred_prev_sample, pred_original_sample\n\tpass" } ]
import os import numpy as np import math import random import torch import select import sys from typing import Any, Callable, Dict, List, Optional, Tuple, Union from PIL import Image from IPython.display import display from torch import functional as F from torch import nn from torchvision.transforms import Compose, Resize, GaussianBlur, InterpolationMode from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers import DDPMScheduler, DDIMScheduler, UniPCMultistepScheduler from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.image_processor import VaeImageProcessor from diffusers.utils import ( BaseOutput, randn_tensor, numpy_to_pil, pt_to_pil, # make_image_grid, is_accelerate_available, is_accelerate_version, is_compiled_module, logging, randn_tensor, replace_example_docstring ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.models.attention_processor import Attention, AttentionProcessor from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from .renderer.project import UVProjection as UVP from .syncmvd.attention import SamplewiseAttnProcessor2_0, replace_attention_processors from .syncmvd.prompt import * from .syncmvd.step import step_tex from .utils import *
12,093
for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end): replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1) else: replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=0) noise_pred = self.unet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples_batch, mid_block_additional_residual=mid_block_res_sample_batch, return_dict=False, )[0] noise_pred_list.append(noise_pred) noise_pred_list = [torch.index_select(noise_pred, dim=0, index=torch.tensor(meta[1], device=self._execution_device)) for noise_pred, meta in zip(noise_pred_list, self.group_metas)] noise_pred = torch.cat(noise_pred_list, dim=0) down_block_res_samples_list = None mid_block_res_sample_list = None noise_pred_list = None model_input_batches = prompt_embeds_batches = down_block_res_samples_batches = mid_block_res_sample_batches = None result_groups[prompt_tag] = noise_pred positive_noise_pred = result_groups["positive"] # perform guidance if do_classifier_free_guidance: noise_pred = result_groups["negative"] + guidance_scale * (positive_noise_pred - result_groups["negative"]) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) self.uvp.to(self._execution_device) # compute the previous noisy sample x_t -> x_t-1 # Multi-View step or individual step if t > (1-multiview_diffusion_end)*num_timesteps:
if torch.cuda.is_available(): device = torch.device("cuda:0") torch.cuda.set_device(device) else: device = torch.device("cpu") # Background colors color_constants = {"black": [-1, -1, -1], "white": [1, 1, 1], "maroon": [0, -1, -1], "red": [1, -1, -1], "olive": [0, 0, -1], "yellow": [1, 1, -1], "green": [-1, 0, -1], "lime": [-1 ,1, -1], "teal": [-1, 0, 0], "aqua": [-1, 1, 1], "navy": [-1, -1, 0], "blue": [-1, -1, 1], "purple": [0, -1 , 0], "fuchsia": [1, -1, 1]} color_names = list(color_constants.keys()) # Used to generate depth or normal conditioning images @torch.no_grad() def get_conditioning_images(uvp, output_size, render_size=512, blur_filter=5, cond_type="normal"): verts, normals, depths, cos_maps, texels, fragments = uvp.render_geometry(image_size=render_size) masks = normals[...,3][:,None,...] masks = Resize((output_size//8,)*2, antialias=True)(masks) normals_transforms = Compose([ Resize((output_size,)*2, interpolation=InterpolationMode.BILINEAR, antialias=True), GaussianBlur(blur_filter, blur_filter//3+1)] ) if cond_type == "normal": view_normals = uvp.decode_view_normal(normals).permute(0,3,1,2) *2 - 1 conditional_images = normals_transforms(view_normals) # Some problem here, depth controlnet don't work when depth is normalized # But it do generate using the unnormalized form as below elif cond_type == "depth": view_depths = uvp.decode_normalized_depth(depths).permute(0,3,1,2) conditional_images = normals_transforms(view_depths) return conditional_images, masks # Revert time 0 background to time t to composite with time t foreground @torch.no_grad() def composite_rendered_view(scheduler, backgrounds, foregrounds, masks, t): composited_images = [] for i, (background, foreground, mask) in enumerate(zip(backgrounds, foregrounds, masks)): if t > 0: alphas_cumprod = scheduler.alphas_cumprod[t] noise = torch.normal(0, 1, background.shape, device=background.device) background = (1-alphas_cumprod) * noise + alphas_cumprod * background composited = foreground * mask + background * (1-mask) composited_images.append(composited) composited_tensor = torch.stack(composited_images) return composited_tensor # Split into micro-batches to use less memory in each unet prediction # But need more investigation on reducing memory usage # Assume it has no possitive effect and use a large "max_batch_size" to skip splitting def split_groups(attention_mask, max_batch_size, ref_view=[]): group_sets = [] group = set() ref_group = set() idx = 0 while idx < len(attention_mask): new_group = group | set([idx]) new_ref_group = (ref_group | set(attention_mask[idx] + ref_view)) - new_group if len(new_group) + len(new_ref_group) <= max_batch_size: group = new_group ref_group = new_ref_group idx += 1 else: assert len(group) != 0, "Cannot fit into a group" group_sets.append((group, ref_group)) group = set() ref_group = set() if len(group)>0: group_sets.append((group, ref_group)) group_metas = [] for group, ref_group in group_sets: in_mask = sorted(list(group | ref_group)) out_mask = [] group_attention_masks = [] for idx in in_mask: if idx in group: out_mask.append(in_mask.index(idx)) group_attention_masks.append([in_mask.index(idxx) for idxx in attention_mask[idx] if idxx in in_mask]) ref_attention_mask = [in_mask.index(idx) for idx in ref_view] group_metas.append([in_mask, out_mask, group_attention_masks, ref_attention_mask]) return group_metas ''' MultiView-Diffusion Stable-Diffusion Pipeline Modified from a Diffusers StableDiffusionControlNetPipeline Just mimic the pipeline structure but did not follow any API convention ''' class StableSyncMVDPipeline(StableDiffusionControlNetPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = False, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, requires_safety_checker ) self.scheduler = DDPMScheduler.from_config(self.scheduler.config) self.model_cpu_offload_seq = "vae->text_encoder->unet->vae" self.enable_model_cpu_offload() self.enable_vae_slicing() self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def initialize_pipeline( self, mesh_path=None, mesh_transform=None, mesh_autouv=None, camera_azims=None, camera_centers=None, top_cameras=True, ref_views=[], latent_size=None, render_rgb_size=None, texture_size=None, texture_rgb_size=None, max_batch_size=24, logging_config=None, ): # Make output dir output_dir = logging_config["output_dir"] self.result_dir = f"{output_dir}/results" self.intermediate_dir = f"{output_dir}/intermediate" dirs = [output_dir, self.result_dir, self.intermediate_dir] for dir_ in dirs: if not os.path.isdir(dir_): os.mkdir(dir_) # Define the cameras for rendering self.camera_poses = [] self.attention_mask=[] self.centers = camera_centers cam_count = len(camera_azims) front_view_diff = 360 back_view_diff = 360 front_view_idx = 0 back_view_idx = 0 for i, azim in enumerate(camera_azims): if azim < 0: azim += 360 self.camera_poses.append((0, azim)) self.attention_mask.append([(cam_count+i-1)%cam_count, i, (i+1)%cam_count]) if abs(azim) < front_view_diff: front_view_idx = i front_view_diff = abs(azim) if abs(azim - 180) < back_view_diff: back_view_idx = i back_view_diff = abs(azim - 180) # Add two additional cameras for painting the top surfaces if top_cameras: self.camera_poses.append((30, 0)) self.camera_poses.append((30, 180)) self.attention_mask.append([front_view_idx, cam_count]) self.attention_mask.append([back_view_idx, cam_count+1]) # Reference view for attention (all views attend the the views in this list) # A forward view will be used if not specified if len(ref_views) == 0: ref_views = [front_view_idx] # Calculate in-group attention mask self.group_metas = split_groups(self.attention_mask, max_batch_size, ref_views) # Set up pytorch3D for projection between screen space and UV space # uvp is for latent and uvp_rgb for rgb color self.uvp = UVP(texture_size=texture_size, render_size=latent_size, sampling_mode="nearest", channels=4, device=self._execution_device) if mesh_path.lower().endswith(".obj"): self.uvp.load_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) elif mesh_path.lower().endswith(".glb"): self.uvp.load_glb_mesh(mesh_path, scale_factor=mesh_transform["scale"] or 1, autouv=mesh_autouv) else: assert False, "The mesh file format is not supported. Use .obj or .glb." self.uvp.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) self.uvp_rgb = UVP(texture_size=texture_rgb_size, render_size=render_rgb_size, sampling_mode="nearest", channels=3, device=self._execution_device) self.uvp_rgb.mesh = self.uvp.mesh.clone() self.uvp_rgb.set_cameras_and_render_settings(self.camera_poses, centers=camera_centers, camera_distance=4.0) _,_,_,cos_maps,_, _ = self.uvp_rgb.render_geometry() self.uvp_rgb.calculate_cos_angle_weights(cos_maps, fill=False) # Save some VRAM del _, cos_maps self.uvp.to("cpu") self.uvp_rgb.to("cpu") color_images = torch.FloatTensor([color_constants[name] for name in color_names]).reshape(-1,3,1,1).to(dtype=self.text_encoder.dtype, device=self._execution_device) color_images = torch.ones( (1,1,latent_size*8, latent_size*8), device=self._execution_device, dtype=self.text_encoder.dtype ) * color_images color_images *= ((0.5*color_images)+0.5) color_latents = encode_latents(self.vae, color_images) self.color_latents = {color[0]:color[1] for color in zip(color_names, [latent for latent in color_latents])} self.vae = self.vae.to("cpu") print("Done Initialization") ''' Modified from a StableDiffusion ControlNet pipeline Multi ControlNet not supported yet ''' @torch.no_grad() def __call__( self, prompt: str = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: str = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, return_dict: bool = False, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, max_batch_size=6, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_guess_mode: bool = False, controlnet_conditioning_scale: Union[float, List[float]] = 0.7, controlnet_conditioning_end_scale: Union[float, List[float]] = 0.9, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 0.99, guidance_rescale: float = 0.0, mesh_path: str = None, mesh_transform: dict = None, mesh_autouv = False, camera_azims=None, camera_centers=None, top_cameras=True, texture_size = 1536, render_rgb_size=1024, texture_rgb_size = 1024, multiview_diffusion_end=0.8, shuffle_background_change=0.4, shuffle_background_end=0.99, #0.4 use_directional_prompt=True, ref_attention_end=0.2, logging_config=None, cond_type="depth", ): # Setup pipeline settings self.initialize_pipeline( mesh_path=mesh_path, mesh_transform=mesh_transform, mesh_autouv=mesh_autouv, camera_azims=camera_azims, camera_centers=camera_centers, top_cameras=top_cameras, ref_views=[], latent_size=height//8, render_rgb_size=render_rgb_size, texture_size=texture_size, texture_rgb_size=texture_rgb_size, max_batch_size=max_batch_size, logging_config=logging_config ) num_timesteps = self.scheduler.config.num_train_timesteps initial_controlnet_conditioning_scale = controlnet_conditioning_scale log_interval = logging_config.get("log_interval", 10) view_fast_preview = logging_config.get("view_fast_preview", True) tex_fast_preview = logging_config.get("tex_fast_preview", True) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): # mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 mult = 1 control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [ control_guidance_end ] # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, torch.zeros((1,3,height,width), device=self._execution_device), callback_steps, negative_prompt, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, list): assert len(prompt) == 1 and len(negative_prompt) == 1, "Only implemented for 1 (negative) prompt" assert num_images_per_prompt == 1, "Only implemented for 1 image per-prompt" batch_size = len(self.uvp.cameras) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): # controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = controlnet_guess_mode or global_pool_conditions # 3. Encode input prompt prompt, negative_prompt = prepare_directional_prompt(prompt, negative_prompt) text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=None, negative_prompt_embeds=None, lora_scale=text_encoder_lora_scale, ) negative_prompt_embeds, prompt_embeds = torch.chunk(prompt_embeds, 2) prompt_embed_dict = dict(zip(direction_names, [emb for emb in prompt_embeds])) negative_prompt_embed_dict = dict(zip(direction_names, [emb for emb in negative_prompt_embeds])) # (4. Prepare image) This pipeline use internal conditional images from Pytorch3D self.uvp.to(self._execution_device) conditioning_images, masks = get_conditioning_images(self.uvp, height, cond_type=cond_type) conditioning_images = conditioning_images.type(prompt_embeds.dtype) cond = (conditioning_images/2+0.5).permute(0,2,3,1).cpu().numpy() cond = np.concatenate([img for img in cond], axis=1) numpy_to_pil(cond)[0].save(f"{self.intermediate_dir}/cond.jpg") # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 6. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, None, ) latent_tex = self.uvp.set_noise_texture() noise_views = self.uvp.render_textured_views() foregrounds = [view[:-1] for view in noise_views] masks = [view[-1:] for view in noise_views] composited_tensor = composite_rendered_view(self.scheduler, latents, foregrounds, masks, timesteps[0]+1) latents = composited_tensor.type(latents.dtype) self.uvp.to("cpu") # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order intermediate_results = [] background_colors = [random.choice(list(color_constants.keys())) for i in range(len(self.camera_poses))] dbres_sizes_list = [] mbres_size_list = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # mix prompt embeds according to azim angle positive_prompt_embeds = [azim_prompt(prompt_embed_dict, pose) for pose in self.camera_poses] positive_prompt_embeds = torch.stack(positive_prompt_embeds, axis=0) negative_prompt_embeds = [azim_neg_prompt(negative_prompt_embed_dict, pose) for pose in self.camera_poses] negative_prompt_embeds = torch.stack(negative_prompt_embeds, axis=0) # expand the latents if we are doing classifier free guidance latent_model_input = self.scheduler.scale_model_input(latents, t) ''' Use groups to manage prompt and results Make sure negative and positive prompt does not perform attention together ''' prompt_embeds_groups = {"positive": positive_prompt_embeds} result_groups = {} if do_classifier_free_guidance: prompt_embeds_groups["negative"] = negative_prompt_embeds for prompt_tag, prompt_embeds in prompt_embeds_groups.items(): if prompt_tag == "positive" or not guess_mode: # controlnet(s) inference control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] # Split into micro-batches according to group meta info # Ignore this feature for now down_block_res_samples_list = [] mid_block_res_sample_list = [] model_input_batches = [torch.index_select(control_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(controlnet_prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] conditioning_images_batches = [torch.index_select(conditioning_images, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch ,prompt_embeds_batch, conditioning_images_batch \ in zip (model_input_batches, prompt_embeds_batches, conditioning_images_batches): down_block_res_samples, mid_block_res_sample = self.controlnet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, controlnet_cond=conditioning_images_batch, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) down_block_res_samples_list.append(down_block_res_samples) mid_block_res_sample_list.append(mid_block_res_sample) ''' For the ith element of down_block_res_samples, concat the ith element of all mini-batch result ''' model_input_batches = prompt_embeds_batches = conditioning_images_batches = None if guess_mode: for dbres in down_block_res_samples_list: dbres_sizes = [] for res in dbres: dbres_sizes.append(res.shape) dbres_sizes_list.append(dbres_sizes) for mbres in mid_block_res_sample_list: mbres_size_list.append(mbres.shape) else: # Infered ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. # We copy the tensor shapes from a conditional batch down_block_res_samples_list = [] mid_block_res_sample_list = [] for dbres_sizes in dbres_sizes_list: down_block_res_samples_list.append([torch.zeros(shape, device=self._execution_device, dtype=latents.dtype) for shape in dbres_sizes]) for mbres in mbres_size_list: mid_block_res_sample_list.append(torch.zeros(mbres, device=self._execution_device, dtype=latents.dtype)) dbres_sizes_list = [] mbres_size_list = [] ''' predict the noise residual, split into mini-batches Downblock res samples has n samples, we split each sample into m batches and re group them into m lists of n mini batch samples. ''' noise_pred_list = [] model_input_batches = [torch.index_select(latent_model_input, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] prompt_embeds_batches = [torch.index_select(prompt_embeds, dim=0, index=torch.tensor(meta[0], device=self._execution_device)) for meta in self.group_metas] for model_input_batch, prompt_embeds_batch, down_block_res_samples_batch, mid_block_res_sample_batch, meta \ in zip(model_input_batches, prompt_embeds_batches, down_block_res_samples_list, mid_block_res_sample_list, self.group_metas): if t > num_timesteps * (1- ref_attention_end): replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=1) else: replace_attention_processors(self.unet, SamplewiseAttnProcessor2_0, attention_mask=meta[2], ref_attention_mask=meta[3], ref_weight=0) noise_pred = self.unet( model_input_batch, t, encoder_hidden_states=prompt_embeds_batch, cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples_batch, mid_block_additional_residual=mid_block_res_sample_batch, return_dict=False, )[0] noise_pred_list.append(noise_pred) noise_pred_list = [torch.index_select(noise_pred, dim=0, index=torch.tensor(meta[1], device=self._execution_device)) for noise_pred, meta in zip(noise_pred_list, self.group_metas)] noise_pred = torch.cat(noise_pred_list, dim=0) down_block_res_samples_list = None mid_block_res_sample_list = None noise_pred_list = None model_input_batches = prompt_embeds_batches = down_block_res_samples_batches = mid_block_res_sample_batches = None result_groups[prompt_tag] = noise_pred positive_noise_pred = result_groups["positive"] # perform guidance if do_classifier_free_guidance: noise_pred = result_groups["negative"] + guidance_scale * (positive_noise_pred - result_groups["negative"]) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) self.uvp.to(self._execution_device) # compute the previous noisy sample x_t -> x_t-1 # Multi-View step or individual step if t > (1-multiview_diffusion_end)*num_timesteps:
step_results = step_tex(
3
2023-12-09 03:27:58+00:00
16k
SqueezeBits/owlite
owlite/backend/onnx/export.py
[ { "identifier": "OwLiteStatus", "path": "owlite/enums/owlite_status.py", "snippet": "class OwLiteStatus(Enum):\n \"\"\"The enum for specifying model status about compression with `GraphModule.meta`\n\n Attributes\n NOT_COMPRESSED: The model is symbolic traced, but before inserting quantizers.\n COMPRESSED: The quantizers are inserted into the model, but not calibrated.\n CALIBRATED: The calibration of the quantizers in the model is completed.\n \"\"\"\n\n NOT_COMPRESSED = 0\n COMPRESSED = 1\n CALIBRATED = 2\n\n def __repr__(self) -> str:\n return self.name\n\n def __str__(self) -> str:\n return self.name" }, { "identifier": "log", "path": "owlite/logger.py", "snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):" }, { "identifier": "FakeQuantizer", "path": "owlite/nn/fake_quantizer.py", "snippet": "class FakeQuantizer(torch.nn.Module):\n \"\"\"An implementation of fake quantization (a.k.a. quantization simulation)\n\n Attributes:\n step_size (torch.Tensor): The quantization scale, determining the magnitude of each quantization interval.\n zero_point (torch.Tensor): The quantization zero_point. It may be expressed as a float in the context\n of asymmetric quantization, while for symmetric quantization, it is fixed at zero tensor.\n precision (torch.IntTensor): The number of bits used for quantization.\n symmetric (torch.BoolTensor): Whether symmetric quantization is applied.\n unsigned (torch.BoolTensor): Whether unsigned quantization is applied\n per_channel (torch.BoolTensor): Whether per-channel quantization or per-tensor quantization is applied\n learn_zero_point (torch.BoolTensor): whether the zero point is learnable.\n grad_scale (torch.FloatTensor): The gradient scaling factor of quantization parameters.\n _narrow_range (torch.BoolTensor): Whether a narrow range is used in quantization.\n \"\"\"\n\n precision: torch.IntTensor\n symmetric: torch.BoolTensor\n unsigned: torch.BoolTensor\n per_channel: torch.BoolTensor\n learn_zero_point: torch.BoolTensor\n grad_scale: torch.FloatTensor\n _narrow_range: torch.BoolTensor\n\n @classmethod\n def create(\n cls,\n options: Optional[FakeQuantizerOptions],\n channel_size: Optional[int] = None,\n enable: bool = True,\n narrow_range: bool = False,\n ) -> Optional[\"FakeQuantizer\"]:\n \"\"\"Creates a `FakeQuantizer` instance if options is not `None`, otherwise returns `None`\n\n Args:\n options (Optional[FakeQuantizerOptions]): Options for fake quantizer to return. If `None`,\n dose notcreate fake quantizer.\n channel_size (Optional[int], optional): Channel size of per-channel quantization. Not used in\n per-tensor quantization. If `None`, no channel size is set. Defaults to `None`.\n enable (bool, optional): If true, returns the enabled quantzier. If false, returns the quantizer\n that was disabled. Defaults to `True`\n narrow_range (bool, optional): If true, returns the quantzier with a narrow range. If false, it\n does not have a narrow range. Defaults to `False`\n\n Returns:\n Optional[FakeQuantizer]: If the `options` is valid for quantization returns created fake quantizer.\n Otherwise return `None`.\n \"\"\"\n if options is None or options.precision > 8:\n return None\n return FakeQuantizer(options, channel_size, enable, narrow_range)\n\n def __init__(\n self,\n options: FakeQuantizerOptions,\n channel_size: Optional[int] = None,\n enable: bool = True,\n narrow_range: bool = False,\n ):\n \"\"\"Initializes a FakeQuantizer instance.\n\n Args:\n options (QuantizerOptions): options\n channel_size (Optional[int], optional): The channel size for per-channel quantization. Defaults to None.\n This value is required only when `options.per_channel` is `True`, otherwise has no effect.\n It can be set after the instantiation of the object, must be set before calling its `forward` method.\n enable (bool, optional): whether to enable this quantizer object as soon as it is initialized.\n Defaults to True.\n narrow_range (bool, optional): Use symmetric integer range for signed quantization\n eg) [-127,127] instead of [-128,127] for num_bits=8. Default False.\n\n Raises:\n ValueError: if `options.ptq_calibration` is \"percentile\" but `options.percentile` is `None`.\n \"\"\"\n super().__init__()\n self.register_buffer(\"precision\", torch.tensor(options.precision))\n self.register_buffer(\"symmetric\", torch.tensor(options.symmetric))\n self.register_buffer(\"unsigned\", torch.tensor(options.unsigned))\n self.register_buffer(\"per_channel\", torch.tensor(options.per_channel))\n if not self.symmetric.item() and self.per_channel.item():\n raise RuntimeError(\"asymmetric per_channel quantization is not available\")\n self.register_buffer(\"learn_zero_point\", torch.tensor(options.learn_zero_point))\n self.register_buffer(\"grad_scale\", torch.tensor(options.grad_scale))\n if narrow_range and not (self.symmetric.item() and not self.unsigned.item()):\n log.warning(\n \"narrow_range should only be used with symmetric signed quantization.\\n\"\n \"(narrow_range, symmetric, unsigned) = \"\n f\"({narrow_range}, {self.symmetric.item()}, {self.unsigned.item()})\"\n )\n self.register_buffer(\"_narrow_range\", torch.tensor(narrow_range))\n\n if self.per_channel:\n if channel_size is not None:\n self.channel_size = channel_size\n else:\n self.step_size = torch.nn.Parameter(torch.ones(1))\n self.zero_point = torch.nn.Parameter(\n torch.zeros(1),\n requires_grad=bool(not self.symmetric.item() and self.learn_zero_point.item()),\n )\n self._is_enabled = enable\n self.is_zero_point_folded = False\n self.qat_backward_type = options.qat_backward\n self.ptq_calibration = options.ptq_calibration\n calibrator_class = options.ptq_calibration.calibrator_class\n if options.ptq_calibration == PTQCalibrationType.percentile:\n if options.percentile is None:\n raise ValueError(\"percentile value is required for percentile PTQ calibrator\")\n self.calibrator = calibrator_class(self, options.percentile)\n else:\n self.calibrator = calibrator_class(self)\n\n @property\n def qat_function(\n self,\n ) -> FakeQuantFunc:\n \"\"\"The autograd function providing forward and backward methods of this fake quantizer\n for the quantization-aware training\"\"\"\n return self.qat_backward_type.function\n\n @property\n def channel_size(self) -> Optional[int]:\n \"\"\"The channel size for the input tensor of this fake quantizer\"\"\"\n if not self.per_channel.item():\n return 1\n step_size = getattr(self, \"step_size\", None)\n zero_point = getattr(self, \"zero_point\", None)\n if not (\n isinstance(step_size, (torch.nn.Parameter, torch.Tensor))\n and isinstance(zero_point, (torch.nn.Parameter, torch.Tensor))\n ):\n return None\n if not (len(step_size.shape) == 1 and step_size.shape == zero_point.shape):\n log.error(\"step_size and zero_point have invalid shapes.\")\n log.debug(f\"self={self}\\n\" \"self.step_size={step_size}\\n\" \"self.zero_point={zero_point}\\n\")\n raise ValueError(\"step_size and zero_point have invalid shapes\")\n return int(step_size.shape[0])\n\n @channel_size.setter\n def channel_size(self, value: Optional[int]) -> None:\n \"\"\"Sets the channel size for the input tensor of this fake quantizer. Note that this property must be set at\n least (and exactly) once before calling this fake quantizer instance when `per_channel=True`\n \"\"\"\n if not self.per_channel.item():\n log.warning(\n \"Setting channel_size value will have no effect for per tensor weight quantization.\",\n stacklevel=2,\n )\n return\n existing_channel_size = self.channel_size\n if existing_channel_size is not None:\n log.error(f\"channel_size value was already set to {existing_channel_size}. It cannot be reset.\")\n raise RuntimeError(\"channel_size cannot be reset.\")\n if value is None:\n return\n self.step_size = torch.nn.Parameter(torch.ones(value))\n self.zero_point = torch.nn.Parameter(\n torch.zeros(value),\n requires_grad=bool(not self.symmetric.item() and self.learn_zero_point.item()),\n )\n\n @property\n def quant_min(self) -> int:\n \"\"\"The minimum integer value this fake quantizer can handle\"\"\"\n if self.narrow:\n return int(-(1 << (int(self.precision.item()) - 1)) + 1)\n return 0 if self.unsigned.item() else int(-(1 << (int(self.precision.item()) - 1)))\n\n @property\n def quant_max(self) -> int:\n \"\"\"The maximum integer value this fake quantizer can handle\"\"\"\n if self.narrow:\n return (1 << int(self.precision.item())) - 1 + self.quant_min - 1\n return (1 << int(self.precision.item())) - 1 + self.quant_min\n\n @property\n def narrow(self) -> bool:\n \"\"\"Returns True in quantizer using narrow range and False otherwise.\"\"\"\n if torch.jit.is_tracing():\n return False\n return bool(self._narrow_range.item() and not self.unsigned.item() and self.symmetric.item())\n\n @property\n def is_enabled(self) -> bool:\n \"\"\"get quantizer mode\"\"\"\n return self._is_enabled\n\n def enable(self, mode: bool = True) -> None:\n \"\"\"Sets Quantizer in quantization enabling mode\n\n Args:\n mode (bool, optional): If `True`, enable quantization. Otherwise, disable quantization. Defaults to `True`.\n \"\"\"\n self._is_enabled = mode\n\n def disable(self) -> None:\n \"\"\"Sets quantizer in quantization disabling mode\"\"\"\n self._is_enabled = False\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"The forward pass of fake quantizer\n\n Args:\n inputs (torch.Tensor): A tensor to fake-quantize.\n\n Raises\n ValueError: If fake quantizer has negative step_size or size of channel is invalid.\n\n Returns\n torch.Tensor: If fake quantizer is enabled, it returns a fake-quantized tensor.\n If fake quantizer is disable, it returns the value as entered.\n \"\"\"\n if not self._is_enabled:\n return inputs\n\n if (self.per_channel) and isinstance(inputs, torch.Tensor) and (self.channel_size != inputs.shape[0]):\n if self.channel_size is None:\n raise ValueError(\"channel_size(=None) must be set for per channel weight quantization\")\n raise ValueError(\n f\"channel_size(={self.channel_size}) value must be the same as \"\n f\"the first dimension of the input tensor (={inputs.shape[0]}).\"\n )\n\n if self.step_size.min() <= 0:\n log.error(\n f\"Expected step_size to be positive, but got step_size={self.step_size.data}. \"\n \"Please try one of the suggestions below:\\n\"\n \" * set the weight_decay of the fake quantizer's parameters to 0;\\n\"\n \" * reduce the learning rate for the fake quantizer's parameters; or\\n\"\n \" * reduce the grad_scale of the fake quantizer\"\n )\n raise ValueError(\"Step_size must be positive\")\n\n return self.qat_function(\n inputs,\n self.step_size,\n self.zero_point,\n self.grad_scale,\n self.quant_min,\n self.quant_max,\n self.per_channel,\n not self.is_zero_point_folded,\n )\n\n def invert_signedness(self) -> None:\n \"\"\"Inverts signedness of this fake quantizer\"\"\"\n self.unsigned.data = torch.logical_not(self.unsigned.data)\n\n # pylint: disable=protected-access\n def extra_repr(self) -> str:\n if self.precision.item() == 32:\n return f\"precision: {self.precision.item()}\"\n string = f\"{self.qat_backward_type}(precision: {self.precision.item()}\"\n string += \", per_tensor\" if not self.per_channel.item() else \", per_channel\"\n string += f\", quant_min: {self.quant_min}, quant_max: {self.quant_max}\"\n if not self.symmetric.item():\n string += \", asymmetric\"\n string += (\n f\", zero_point: {self.zero_point.item()}, is_zero_point_folded: {self.is_zero_point_folded}\"\n if not self.per_channel.item()\n else \"\"\n )\n string += f\", is_enabled: {self.is_enabled}\"\n string += f\", calib: {self.calibrator.__class__.__name__}\"\n string += \")\"\n return string\n\n @property\n def maxabs_bound(self) -> int:\n \"\"\"The maximum absolute limit value of the quantized domain.\n\n Returns:\n int: A Maximum absolute bound value.\n \"\"\"\n return max(abs(self.quant_min), abs(self.quant_max))\n\n @property\n def options(self) -> FakeQuantizerOptions:\n \"\"\"The options that current FakeQuantizer instance represents.\"\"\"\n percentile = getattr(self.calibrator, \"percentile\", None)\n zero_point = getattr(self, \"zero_point\", None)\n learn_zero_point = False if zero_point is None else zero_point.requires_grad\n\n return FakeQuantizerOptions(\n qat_backward=self.qat_backward_type,\n ptq_calibration=self.ptq_calibration,\n percentile=percentile,\n precision=int(self.precision.item()),\n symmetric=bool(self.symmetric.item()),\n unsigned=bool(self.unsigned.item()),\n per_channel=bool(self.per_channel.item()),\n learn_zero_point=learn_zero_point,\n grad_scale=self.grad_scale.item(),\n )\n\n def state_dict( # type: ignore[no-untyped-def, override]\n self, *args, **kwargs\n ) -> Union[OrderedDict[Any, Any], dict[str, Any]]:\n \"\"\"Stores the indices of ptq_calibration and qat_backward in addition to the torch state dict.\n\n Returns:\n dict:\n a dictionary containing a whole state of the module.\n \"\"\"\n state: OrderedDict = super().state_dict(*args, **kwargs)\n prefix = kwargs.get(\"prefix\")\n extra_state = {}\n # add qat_backward index\n extra_state[f\"{prefix}_qat_backward\"] = torch.tensor([self.qat_backward_type.value])\n # add ptq_calibration index\n extra_state[f\"{prefix}_ptq_calibration\"] = torch.tensor([self.ptq_calibration.value])\n if self.ptq_calibration == PTQCalibrationType.percentile:\n if not isinstance(self.calibrator, PercentileCalibrator):\n raise TypeError(\n \"calibrator must be instance of 'PercentileCalibrator' when ptq_calibrtion is 'percentile',\"\n f\"but got {self.calibrator}\"\n )\n extra_state[f\"{prefix}_ptq_calibration_percentile\"] = torch.tensor([self.calibrator.percentile])\n state.update(extra_state)\n return state\n\n def _load_from_state_dict(\n self,\n state_dict: dict,\n prefix: str,\n local_metadata: dict,\n strict: bool,\n missing_keys: list[str],\n unexpected_keys: list[str],\n error_msgs: list[str],\n ) -> None:\n self.qat_backward_type = QATBackwardType(state_dict.pop(f\"{prefix}_qat_backward\").item())\n self.ptq_calibration = PTQCalibrationType(state_dict.pop(f\"{prefix}_ptq_calibration\").item())\n calibrator_class = self.ptq_calibration.calibrator_class\n if self.ptq_calibration == PTQCalibrationType.percentile:\n self.calibrator = calibrator_class(self, state_dict.pop(f\"{prefix}_ptq_calibration_percentile\").item())\n else:\n self.calibrator = calibrator_class(self)\n return super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )" }, { "identifier": "clip_narrow_range_weights", "path": "owlite/backend/fx/transforms.py", "snippet": "def clip_narrow_range_weights(graph_module: GraphModule):\n \"\"\"Clips weights with a narrow range of QConv in the graph_module.\n\n Args:\n graph_module (GraphModule): a graph module\n \"\"\"\n for _, module in graph_module.named_modules(remove_duplicate=True):\n if isinstance(module, (UnaryNeuralQModuleMixin)) and module.weight_quantizer.narrow:\n module.clip_weight()" }, { "identifier": "fold_zp_to_bias", "path": "owlite/backend/fx/transforms.py", "snippet": "def fold_zp_to_bias(qmodel: torch.nn.Module):\n \"\"\"folding all zeropoints of asymmetric quantization to bias of following operations\n\n Args:\n qmodel: model to fold\n \"\"\"\n for _, module in qmodel.named_modules():\n if isinstance(module, (qnn.QConv2d, qnn.QLinear)):\n module.fold_input_quantizer_zero_point_to_bias()" }, { "identifier": "fuse_bn", "path": "owlite/backend/fx/transforms.py", "snippet": "def fuse_bn(model: GraphModule):\n \"\"\"Fuse Conv-BatchNorm patterns in model into Conv\n\n Args:\n model (GraphModule): a graph module, possibly wrapped by dp or ddp.\n \"\"\"\n conv_patterns = [\n (torch.nn.Conv1d, torch.nn.BatchNorm1d),\n (torch.nn.Conv2d, torch.nn.BatchNorm2d),\n (torch.nn.Conv3d, torch.nn.BatchNorm3d),\n (qnn.QConv1d, torch.nn.BatchNorm1d),\n (qnn.QConv2d, torch.nn.BatchNorm2d),\n (qnn.QConv3d, torch.nn.BatchNorm3d),\n ]\n linear_patterns = [\n (torch.nn.Linear, torch.nn.BatchNorm1d),\n (qnn.QLinear, torch.nn.BatchNorm1d),\n ]\n training_status = model.training\n model.eval()\n _fuse_by_patterns(model, conv_patterns, fuse_conv_bn_eval)\n _fuse_by_patterns(model, linear_patterns, fuse_linear_bn_eval)\n model.train(training_status)" }, { "identifier": "get_most_common_device", "path": "owlite/backend/utils.py", "snippet": "def get_most_common_device(model: torch.nn.Module) -> torch.device:\n \"\"\"Finds the most common device where the parameters of the model reside.\n\n Args:\n model (torch.nn.Module): a model\n\n Returns:\n torch.device: the most common device where the parameters of the model reside.\n \"\"\"\n counter = Counter(p.device for p in model.parameters())\n if len(counter) == 0:\n return torch.device(\"cpu\")\n if len(counter) > 1:\n log.warning(f\"The model parameters reside on more than 1 devices: {set(counter.elements())}\")\n return counter.most_common(1)[0][0]" }, { "identifier": "get_most_common_floating_point_type", "path": "owlite/backend/utils.py", "snippet": "def get_most_common_floating_point_type(model: torch.nn.Module) -> torch.dtype:\n \"\"\"Finds the most common floating point data type of the parameters of the model.\n\n Args:\n model (torch.nn.Module): a model\n\n Returns:\n torch.dtype: the most common floating point data type of the parameters of the model\n \"\"\"\n counter = Counter(\n filter(\n lambda dtype: torch.is_floating_point(torch.empty(1, dtype=dtype)),\n (p.dtype for p in model.parameters()),\n )\n )\n if len(counter) == 0:\n return torch.float32\n if len(counter) > 1:\n log.warning(f\"The model parameters have more than 1 floating point types: {set(counter.elements())}\")\n return counter.most_common(1)[0][0]" }, { "identifier": "map_signature", "path": "owlite/backend/utils.py", "snippet": "def map_signature(func: Callable, *args: Any, **kwargs: Any) -> list[tuple[str, Any]]:\n \"\"\"Maps the parameter names of a function to the corresponding values passed in args and kwargs.\n\n This function returns a list of tuples, where each tuple contains a parameter name and its corresponding value.\n If a parameter name exists in the kwargs dictionary, its value is taken from there. Otherwise, the values are taken\n in order from the args tuple. If there are no values left in args or kwargs, the default value of the parameter\n (if it exists) is used.\n\n Args:\n func (Callable): Function to inspect.\n args (Any): Positional arguments.\n kwargs (Any): Keyword arguments.\n\n Returns:\n list[tuple[str, Any]]: List of tuples mapping parameter names to their values.\n\n Note:\n This function assumes that `args` and `kwargs` match the exact function signature,\n in order and length. If they don't, the result may not be as expected or exceptions might occur.\n \"\"\"\n sig = inspect.signature(func)\n params = sig.parameters\n\n mapped = []\n\n args_iter = iter(args)\n for name, param in params.items():\n if name in kwargs:\n mapped.append((name, kwargs[name]))\n elif args:\n mapped.append((name, next(args_iter, param.default)))\n else:\n mapped.append((name, param.default))\n\n return mapped" }, { "identifier": "move_tensors_to", "path": "owlite/backend/utils.py", "snippet": "def move_tensors_to(\n args: Any,\n device: Optional[torch.device] = None,\n dtype: Optional[torch.dtype] = None,\n) -> Any:\n \"\"\"Assign device and dtype to tensors in a nested structure containing torch.Tensor instances.\n\n Args:\n args (Any): a nested structure (dict / list / tuple) of torch.Tensor instances.\n device (Optional[torch.device], optional): if provided, moves all tensors to the device. Defaults to None.\n dtype (Optional[torch.dtype], optional): if the dtype is a floating point type, only floating point typed\n tensors in args will be casted to dtype. The behavior is similar when dtype is a signed integer type\n or unsigned integer type. Defaults to None.\n\n Returns:\n Any: the nested structure of tensors with possibly modified device and dtype.\n \"\"\"\n if isinstance(args, dict):\n return {key: move_tensors_to(value, device, dtype) for key, value in args.items()}\n\n if isinstance(args, tuple):\n return tuple(move_tensors_to(x, device, dtype) for x in args)\n\n if isinstance(args, list):\n return [move_tensors_to(x, device, dtype) for x in args]\n\n if isinstance(args, torch.Tensor) and dtype is not None:\n is_args_dtype_integral = not args.dtype.is_floating_point and not args.dtype.is_complex\n is_dtype_integral = not dtype.is_floating_point and not dtype.is_complex\n if (is_dtype_integral and is_args_dtype_integral and (args.dtype.is_signed == dtype.is_signed)) or (\n args.dtype.is_floating_point and dtype.is_floating_point\n ):\n args = args.to(dtype)\n\n if isinstance(args, torch.Tensor) and device is not None:\n args = args.to(device)\n\n return args" }, { "identifier": "nodestr", "path": "owlite/backend/utils.py", "snippet": "def nodestr(node: Optional[AnyNode], show_activations: bool = False) -> str:\n \"\"\"Generates the string representation of a node instance\n\n Args:\n node (Optional[AnyNode]): a node. Must be an instance of one of the types:\n torch.fx.Node, onnx.NodeProto or gs.Node\n show_activations (bool, optional): Only available if node is either onnx.NodeProto or gs.Node instance. If True,\n the string representation contains the information about the node's input and output activations.\n Defaults to False.\n\n Returns:\n str: the string representation of the node\n \"\"\"\n if node is None:\n return \"<node-not-found>\"\n if isinstance(node, ONNXNode):\n s = f\"{node.name} ({node.op_type})\"\n if show_activations:\n a = json.dumps(\n {\"inputs\": list(node.input), \"outputs\": list(node.output)},\n indent=2,\n sort_keys=True,\n )\n s = f\"{s}: {a}\"\n return s\n if isinstance(node, FXNode):\n if (\n node.op == \"call_module\"\n and isinstance(node.target, str)\n and isinstance(node.graph.owning_module, GraphModule)\n ):\n target = node.graph.owning_module.get_submodule(node.target)\n else:\n target = node.target\n s = f\"{node.name}: {node.op}({targetstr(target)})\"\n if show_activations:\n a = json.dumps(\n {\n \"args\": f\"{node.args}\",\n \"kwargs\": f\"{node.kwargs}\",\n \"inputs\": [*map(nodestr, node.all_input_nodes)],\n \"outputs\": [*map(nodestr, node.users)],\n },\n indent=2,\n sort_keys=True,\n )\n s = f\"{s}: {a}\"\n return s\n if isinstance(node, gs.Node):\n s = f\"{node.name} ({node.op})\"\n if show_activations:\n a = json.dumps(\n {\n \"inputs\": [*(t.name for t in node.inputs)],\n \"outputs\": [*(t.name for t in node.outputs)],\n },\n indent=2,\n sort_keys=True,\n )\n s = f\"{s}: {a}\"\n return s\n return \"<not-a-node>\"" }, { "identifier": "DynamicDimensions", "path": "owlite/backend/onnx/dynamize.py", "snippet": "class DynamicDimensions:\n \"\"\"Dynamic dimension settings.\"\"\"\n\n dim_size: int\n settings: dict[str, DynamicSetting]\n\n @property\n def dynamic_input_names(self) -> list[str]: # pylint: disable=missing-function-docstring\n return list(self.settings.keys())\n\n def get(self, name: str) -> Optional[DynamicSetting]:\n \"\"\"Gets dynamic setting to be applied to tensor with given name.\n\n Args:\n name (str): Name of a model input tensor.\n\n Returns:\n Optional[DynamicSetting]: Setting to be applied if setting for given name exists.\n \"\"\"\n return self.settings.get(name)" }, { "identifier": "dynamize", "path": "owlite/backend/onnx/dynamize.py", "snippet": "def dynamize(onnx_proto: ModelProto, dynamic_dims: DynamicDimensions) -> ModelProto:\n \"\"\"Dynamizes given ONNX proto with given dynamic dimension setting.\n\n Args:\n onnx_proto (ModelProto): ONNX model proto to dynamize.\n dynamic_dims (DynamicDimensions): Dynamic dimension setting configured by `configure_dynamic_dimensions`.\n\n Raises:\n ValueError: When dynamic ONNX proto is given.\n NotImplementedError: When dynamizing ONNX proto with reshapes with dynamic target shape is attempted.\n RuntimeError: When attempt to dynamize given ONNX proto has failed.\n\n Returns:\n ModelProto: Dynamized ONNX proto.\n \"\"\"\n\n if any(\n any(not isinstance(s, int) or s == -1 for s in extract_tensor_shape(vi) or [])\n for vi in onnx_proto.graph.value_info\n ):\n raise ValueError(\"Dynamic ONNX proto given\")\n\n input_shapes = [extract_tensor_shape(input) for input in onnx_proto.graph.input]\n if not all(shape and -1 not in shape and all(isinstance(s, int) for s in shape) for shape in input_shapes):\n raise ValueError(\"Dynamic ONNX proto given\")\n\n graph = gs.import_onnx(onnx_proto)\n\n for node in graph.nodes:\n if node.op == \"Reshape\":\n if isinstance(node.inputs[1], gs.Variable):\n raise NotImplementedError(\"Dynamizing reshapes with dynamic target shape is not supported yet\")\n\n if -1 in node.inputs[1].values:\n node.inputs[1].values = np.array(node.outputs[0].shape)\n\n dfs_stack: list[tuple[int, Sequence[int], gs.Node]] = []\n visited: set[str] = set()\n handled_tensors: set[str] = set()\n for input_tensor in graph.inputs:\n setting = dynamic_dims.get(input_tensor.name)\n if setting is None:\n continue\n\n dynamic_dim = setting.dim\n # pylint: disable-next=cell-var-from-loop\n dfs_stack.extend(\n [\n (\n dynamic_dim,\n input_tensor.shape,\n node,\n )\n for node in input_tensor.outputs\n ]\n )\n new_shape = list(input_tensor.shape[:])\n new_shape[dynamic_dim] = \"N\"\n input_tensor.shape = new_shape\n\n while len(dfs_stack) > 0:\n input_dynamic_dim, input_shape, node = dfs_stack.pop()\n\n if node.name in visited:\n continue\n\n output_dynamic_dim = input_dynamic_dim\n\n if node.op == \"Reshape\":\n if node.inputs[1].name in handled_tensors:\n continue\n\n target_shape = node.inputs[1].values\n\n elements_til_batch = 0\n for i in range(input_dynamic_dim + 1):\n elements_til_batch = (elements_til_batch if elements_til_batch else 1) * input_shape[i]\n\n acc = 1\n for i, dim in enumerate(target_shape):\n if dim == dynamic_dims.dim_size:\n output_dynamic_dim = i\n break\n\n acc = acc * dim\n if acc >= elements_til_batch:\n output_dynamic_dim = i\n break\n\n target_shape[output_dynamic_dim] = -1\n handled_tensors.add(node.inputs[1].name)\n\n elif node.op == \"Resize\":\n axes = node.attrs.get(\"axes\")\n if axes is None or len(axes) == len(node.outputs[0].shape):\n if node.inputs[3].name != \"\": # resize with shape\n assert node.inputs[2].name == \"\"\n\n if isinstance(node.inputs[3], gs.Variable):\n raise ValueError(\"Dynamic ONNX proto given\")\n\n target_shape = node.inputs[3].values\n\n if target_shape[input_dynamic_dim] != node.outputs[0].shape[input_dynamic_dim]:\n raise ValueError(\"Dynamic ONNX proto given\")\n\n node.inputs[2] = gs.Constant(\n f\"{node.name}_scale\", np.array(target_shape / node.inputs[0].shape, dtype=np.float32)\n )\n node.inputs[3] = gs.Variable.empty()\n\n elif input_dynamic_dim in axes:\n raise ValueError(\"Dynamic ONNX proto given\")\n\n elif node.op == \"Transpose\":\n target_permutation = node.attrs.get(\"perm\")\n output_dynamic_dim = target_permutation.index(input_dynamic_dim)\n\n for output in node.outputs:\n # pylint: disable-next=cell-var-from-loop\n children = [(output_dynamic_dim, output.shape[:], child) for child in output.outputs]\n\n if children:\n # dfs_stack.extend(reversed(children))\n dfs_stack.extend(children)\n\n visited.add(node.name)\n\n dynamized_proto = gs.export_onnx(graph)\n dynamized_proto.graph.ClearField(\"value_info\")\n for output in dynamized_proto.graph.output:\n output.type.tensor_type.ClearField(\"shape\")\n\n dynamized_proto = infer_shapes(dynamized_proto, check_type=True, strict_mode=True, data_prop=True)\n\n if not all(\n len([dim.dim_param for dim in output.type.tensor_type.shape.dim if not dim.dim_value]) == 1\n for output in dynamized_proto.graph.output\n ):\n raise RuntimeError(\"Failed to dynamize given ONNX proto\")\n\n return dynamized_proto" }, { "identifier": "compare", "path": "owlite/backend/onnx/model_checking.py", "snippet": "def compare(\n model_opt: Union[str, onnx.ModelProto],\n model_ori: Union[str, onnx.ModelProto],\n n_times: int = 5,\n input_shapes: Optional[TensorShapes] = None,\n input_data: Optional[Tensors] = None,\n custom_lib: Optional[str] = None,\n rtol: Optional[float] = None,\n atol: Optional[float] = None,\n equal_nan: bool = False,\n) -> bool:\n \"\"\"\n :param model_opt: The simplified ONNX model\n :param model_ori: The original ONNX model\n :param n_times: Generate n random inputs\n :param input_shapes: Shapes of generated random inputs\n :param input_data: User-given data instead of random generated data\n :param custom_lib: ONNX Runtime custom lib for custom ops\n :param rtol: The relative tolerance parameter\n (see https://numpy.org/doc/stable/reference/generated/numpy.allclose.html).\n :param atol: The absolute tolerance parameter\n (see https://numpy.org/doc/stable/reference/generated/numpy.allclose.html).\n :param equal_nan: Whether to compare NaN's as equal.\n If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array.\n \"\"\"\n\n def get_shape_from_value_info_proto(v: onnx.ValueInfoProto) -> list[int]:\n return [dim.dim_value for dim in v.type.tensor_type.shape.dim]\n\n def get_value_info_all(\n m: onnx.ModelProto, name: str\n ) -> Optional[onnx.ValueInfoProto]:\n for v in m.graph.value_info:\n if v.name == name:\n return v\n\n for v in m.graph.input:\n if v.name == name:\n return v\n\n for v in m.graph.output:\n if v.name == name:\n return v\n\n return None\n\n def get_shape(m: onnx.ModelProto, name: str) -> TensorShape:\n \"\"\"\n Note: This method relies on onnx shape inference, which is not reliable.\n So only use it on input or output tensors\n \"\"\"\n v = get_value_info_all(m, name)\n if v is not None:\n return get_shape_from_value_info_proto(v)\n raise RuntimeError(f'Cannot get shape of \"{name}\"')\n\n def get_elem_type(m: onnx.ModelProto, name: str) -> Optional[int]:\n v = get_value_info_all(m, name)\n if v is not None:\n return v.type.tensor_type.elem_type\n return None\n\n def get_np_type_from_elem_type(elem_type: int) -> int:\n sizes = (\n None,\n np.float32,\n np.uint8,\n np.int8,\n np.uint16,\n np.int16,\n np.int32,\n np.int64,\n str,\n bool,\n np.float16,\n np.double,\n np.uint32,\n np.uint64,\n np.complex64,\n np.complex128,\n np.float16,\n )\n assert len(sizes) == 17\n size = sizes[elem_type]\n assert size is not None\n return size\n\n def get_input_names(model: onnx.ModelProto) -> list[str]:\n input_names = list(\n {ipt.name for ipt in model.graph.input}\n - {x.name for x in model.graph.initializer}\n )\n return input_names\n\n def generate_rand_input(\n model: Union[str, onnx.ModelProto], input_shapes: Optional[TensorShapes] = None\n ):\n if input_shapes is None:\n input_shapes = {}\n if isinstance(model, str):\n model = onnx.load(model, load_external_data=False)\n input_names = get_input_names(model)\n full_input_shapes = {ipt: get_shape(model, ipt) for ipt in input_names}\n if None in input_shapes:\n log.debug(\"input_shapes:\")\n log.debug(input_shapes)\n raise ValueError(\"None is exist in input shapes\")\n full_input_shapes.update(input_shapes) # type: ignore\n for name, shape in full_input_shapes.items():\n if any(dim <= 0 for dim in shape[1:]):\n raise RuntimeError(\n f'The shape of input \"{name}\" has dynamic size, '\n \"please set an input shape manually with --test-input-shape\"\n )\n if len(shape) > 0 and shape[0] <= 0: # pylint: disable=chained-comparison\n print(\n f'shape[0] of input \"{name}\" is dynamic, we assume it presents batch size and set it as 1 when '\n \"testing. If it is not wanted, please set the it manually by --test-input-shape \"\n \"(see `onnxsim -h` for the details).\"\n )\n shape[0] = 1\n\n inputs = {\n ipt: np.array(\n np.random.rand(*full_input_shapes[ipt]),\n dtype=get_np_type_from_elem_type(get_elem_type(model, ipt)),\n )\n for ipt in input_names\n }\n return inputs\n\n def forward(\n model: Union[str, onnx.ModelProto],\n inputs: Tensors,\n custom_lib: Optional[str] = None,\n ) -> dict[str, np.ndarray]:\n sess_options = rt.SessionOptions()\n if custom_lib is not None:\n if os.path.exists(custom_lib):\n sess_options.register_custom_ops_library(custom_lib)\n else:\n raise ValueError(f\"No such file '{custom_lib}'\")\n sess_options.graph_optimization_level = rt.GraphOptimizationLevel(0)\n sess_options.log_severity_level = 3\n if isinstance(model, onnx.ModelProto):\n model = model.SerializeToString()\n sess = rt.InferenceSession(\n model,\n sess_options=sess_options,\n providers=[\"CPUExecutionProvider\"],\n )\n outputs = [x.name for x in sess.get_outputs()]\n run_options = rt.RunOptions()\n run_options.log_severity_level = 3\n res = OrderedDict(\n zip(outputs, sess.run(outputs, inputs, run_options=run_options))\n )\n return res\n\n if input_shapes is None:\n input_shapes = {}\n onnx.checker.check_model(model_opt)\n for i in range(n_times):\n print(f\"Checking {i}/{n_times}...\")\n if input_data is None:\n inputs = generate_rand_input(model_opt, input_shapes=input_shapes)\n else:\n inputs = input_data\n res_ori = forward(model_ori, inputs, custom_lib)\n res_opt = forward(model_opt, inputs, custom_lib)\n\n if not compare_nested_outputs(\n res_opt, res_ori, rtol=rtol, atol=atol, equal_nan=equal_nan\n ):\n return False\n return True" }, { "identifier": "apply_onnx_transforms", "path": "owlite/backend/onnx/transforms.py", "snippet": "def apply_onnx_transforms(\n onnx_proto: ModelProto, output_path: Optional[str] = None, **kwargs\n) -> ModelProto:\n \"\"\"Applies all transformations registered in this file.\n\n Args:\n onnx_proto (ModelProto): the ONNX model proto to apply transformations.\n output_path (Optional[str], optional): the output path in string. If provided, runs the ModelProto will be\n written with external data after the transformations (required for large models > 2GB). Defaults to None.\n\n Returns:\n ModelProto: _description_\n \"\"\"\n graph = gs.import_onnx(onnx_proto)\n for name, transform in ONNX_TRANSFORMS.items():\n log.debug(f\"Applying ONNX transform: {name}\")\n graph = transform(graph)\n graph.toposort()\n graph = fold_constants(graph)\n graph.cleanup()\n if output_path is None:\n return gs.export_onnx(graph)\n export_to_onnx_with_external_data(graph, output_path, **kwargs)\n return onnx.load(output_path)" } ]
import io import os import tempfile import onnx import onnxsim import onnxsim.model_checking import torch import torch.onnx.errors from collections.abc import Collection, Mapping, Sequence from typing import Any, Optional, Union from onnx import ModelProto from onnx.shape_inference import infer_shapes, infer_shapes_path from onnxsim.onnxsim_cpp2py_export import simplify_path from torch.fx.graph_module import GraphModule from ...enums import OwLiteStatus from ...logger import log from ...nn import FakeQuantizer from ..fx.transforms import clip_narrow_range_weights, fold_zp_to_bias, fuse_bn from ..utils import ( get_most_common_device, get_most_common_floating_point_type, map_signature, move_tensors_to, nodestr, ) from .dynamize import DynamicDimensions, dynamize from .model_checking import compare from .transforms import apply_onnx_transforms
12,508
Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`. * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain). * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops to standard ONNX ops in the default opset domain. If unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting the op into a custom opset domain without conversion. Applies to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_ as well as ATen ops. For the exported model to be usable, the runtime must support these non-standard ops. * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace "aten") are exported as ATen ops (in opset domain "org.pytorch.aten"). `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so this instructs the runtime to use PyTorch's implementation of these ops. .. warning:: Models exported this way are probably runnable only by Caffe2. This may be useful if the numeric differences in implementations of operators are causing large differences in behavior between PyTorch and Caffe2 (which is more common on untrained models). * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op (in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for context. For example:: graph(%0 : Float): %3 : int = prim::Constant[value=0]() # conversion unsupported %4 : Float = aten::triu(%0, %3) # conversion supported %5 : Float = aten::mul(%4, %0) return (%5) Assuming `aten::triu` is not supported in ONNX, this will be exported as:: graph(%0 : Float): %1 : Long() = onnx::Constant[value={0}]() # not converted %2 : Float = aten::ATen[operator="triu"](%0, %1) # converted %3 : Float = onnx::Mul(%2, %0) return (%3) If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then Caffe2-specific behavior will be enabled, including special support for ops are produced by the modules described in `Quantization <https://pytorch.org/docs/stable/quantization.html>`_. .. warning:: Models exported this way are probably runnable only by Caffe2. opset_version (int, optional): The version of the default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18. Defaults to 17. do_constant_folding (bool, optional): Apply the constant-folding optimization. Constant-folding will replace some of the ops that have all constant inputs with pre-computed constant nodes. Defaults to True. keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers (typically corresponding to parameters) in the exported graph will also be added as inputs to the graph. If False, then initializers are not added as inputs to the graph, and only the non-parameter inputs are added as inputs. This may allow for better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None. custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema: * KEY (str): opset domain name * VALUE (int): opset version If a custom opset is referenced by ``model`` but not mentioned in this dictionary, the opset version is set to 1. Only custom opset domain name and version should be indicated through this argument. Defaults to None. export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the particular types of modules to export as local functions in ONNX. This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because ``opset_version`` < 15 implies IR version < 8, which means no local function support. Module variables will be exported as function attributes. There are two categories of function attributes. Defaults to False. use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary export process will be done using temporary files. Defaults to True. apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False` is experimental and might yield unexpected behavior. Defaults to True. simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped. Defaults to True. check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the simplified ONNX proto after onnx-simplifier. Defaults to 1. skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion. Defaults to False. skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of onnx-simplifier passes to skip. Defaults to None. See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes. dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by `configure_dynamic_dimensions`. Defaults to None. Raises: TypeError: If `f` is not a string. ValueError: If the quantizer has invalid condition. `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph. `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it uses an operator that is not supported by the exporter. `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export. All errors are subclasses of :class:`errors.OnnxExporterError`. """ if not isinstance(f, str): raise TypeError("owlite.onnx.export requires the argument `f` to be a string.") if isinstance(module, GraphModule): if module.meta["owlite_status"] == OwLiteStatus.COMPRESSED: log.warning( "This module has not yet been calibrated. " "The onnx that comes out of this module may have unexpected results in accuracy and latency." ) clip_narrow_range_weights(module) # Batch Norm Fusing
"""An alternative for torch.onnx.export with extra optimizations""" # pylint: disable=protected-access # pylint: disable=no-name-in-module # Large models (e.g. SwinTransformer) requires # more than 50 (default) onnxsim iterations os.environ["ONNXSIM_FIXED_POINT_ITERS"] = "100" # pylint: disable=too-many-arguments, too-many-locals, invalid-name def export( module: torch.nn.Module, args: Union[tuple[Any, ...], torch.Tensor], f: str, export_params: bool = True, verbose: bool = False, training: torch._C._onnx.TrainingMode = torch._C._onnx.TrainingMode.EVAL, input_names: Optional[Sequence[str]] = None, output_names: Optional[Sequence[str]] = None, operator_export_type: torch._C._onnx.OperatorExportTypes = torch._C._onnx.OperatorExportTypes.ONNX, opset_version: int = 17, do_constant_folding: bool = True, keep_initializers_as_inputs: Optional[bool] = None, custom_opsets: Optional[Mapping[str, int]] = None, export_modules_as_functions: Union[bool, Collection[type[torch.nn.Module]]] = False, use_fast_export: bool = True, apply_transforms: bool = True, simplify: bool = True, check_n: int = 1, skip_fuse_bn: bool = False, skipped_optimizers: Optional[list[str]] = None, dynamic_dimensions: Optional[DynamicDimensions] = None, ) -> None: r"""Exports a model into ONNX format. Args: module (torch.nn.Module): The model to be exported. args (Union[tuple[Any, ...], torch.Tensor]): Argument of a `module`. args can be structured either as: 1. ONLY A TUPLE OF ARGUMENTS:: args = (x, y, z) The tuple should contain model inputs such that `module(*args)` is a valid invocation of the model. Any non-Tensor arguments will be hard-coded into the exported model; any Tensor arguments will become inputs of the exported model, in the order they occur in the tuple. 2. A TENSOR:: args = torch.Tensor([1]) This is equivalent to a 1-ary tuple of that Tensor. 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS:: args = ( x, { "y": input_y, "z": input_z } ) All but the last element of the tuple will be passed as non-keyword arguments, and named arguments will be set from the last element. If a named argument is not present in the dictionary, it is assigned the default value, or None if a default value is not provided. .. note:: If a dictionary is the last element of the args tuple, it will be interpreted as containing named arguments. In order to pass a dict as the last non-keyword arg, provide an empty dict as the last element of the args tuple. For example, instead of:: export( module, ( x, # WRONG: will be interpreted as named arguments {y: z} ), "test.onnx.pb" ) Write:: export( module, ( x, {y: z}, {} ), "test.onnx.pb" ) f (str): A string containing a file name. A binary protocol buffer will be written to this file. export_params (bool, optional): If True, all parameters will be exported. Set this to False if you want to export an untrained model. In this case, the exported model will first take all of its parameters as arguments, with the ordering as specified by `module.state_dict().values()`. Defaults to True. verbose (bool, optional): If True, prints a description of the model being exported to stdout. In addition, the final ONNX graph will include the field `doc_string` from the exported model which mentions the source code locations for `module`. If True, ONNX exporter logging will be turned on. Defaults to False. training (torch._C._onnx.TrainingMode, optional): Defaults to torch._C._onnx.TrainingMode.EVAL. * `TrainingMode.EVAL`: export the model in inference mode. * `TrainingMode.PRESERVE`: export the model in inference mode if model.training is False and in training mode if model.training is True. * `TrainingMode.TRAINING`: export the model in training mode. Disables optimizations which might interfere with training. input_names (Optional[Sequence[str]], optional): Names to assign to the input nodes of the graph, in order. Names of `module.forward` arguments will be used when None is given. Defaults to None. output_names (Optional[Sequence[str]], optional): Names to assign to the output nodes of the graph, in order. Defaults to None. operator_export_type (torch._C._onnx.OperatorExportTypes, optional): Defaults to `torch._C._onnx.OperatorExportTypes.ONNX`. * `OperatorExportTypes.ONNX`: Export all ops as regular ONNX ops (in the default opset domain). * `OperatorExportTypes.ONNX_FALLTHROUGH`: Try to convert all ops to standard ONNX ops in the default opset domain. If unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting the op into a custom opset domain without conversion. Applies to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_ as well as ATen ops. For the exported model to be usable, the runtime must support these non-standard ops. * `OperatorExportTypes.ONNX_ATEN`: All ATen ops (in the TorchScript namespace "aten") are exported as ATen ops (in opset domain "org.pytorch.aten"). `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so this instructs the runtime to use PyTorch's implementation of these ops. .. warning:: Models exported this way are probably runnable only by Caffe2. This may be useful if the numeric differences in implementations of operators are causing large differences in behavior between PyTorch and Caffe2 (which is more common on untrained models). * `OperatorExportTypes.ONNX_ATEN_FALLBACK`: Try to export each ATen op (in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for context. For example:: graph(%0 : Float): %3 : int = prim::Constant[value=0]() # conversion unsupported %4 : Float = aten::triu(%0, %3) # conversion supported %5 : Float = aten::mul(%4, %0) return (%5) Assuming `aten::triu` is not supported in ONNX, this will be exported as:: graph(%0 : Float): %1 : Long() = onnx::Constant[value={0}]() # not converted %2 : Float = aten::ATen[operator="triu"](%0, %1) # converted %3 : Float = onnx::Mul(%2, %0) return (%3) If PyTorch was built with Caffe2 (i.e. with `BUILD_CAFFE2=1`), then Caffe2-specific behavior will be enabled, including special support for ops are produced by the modules described in `Quantization <https://pytorch.org/docs/stable/quantization.html>`_. .. warning:: Models exported this way are probably runnable only by Caffe2. opset_version (int, optional): The version of the default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md> to target. Must be >= 7 and <= 18. Defaults to 17. do_constant_folding (bool, optional): Apply the constant-folding optimization. Constant-folding will replace some of the ops that have all constant inputs with pre-computed constant nodes. Defaults to True. keep_initializers_as_inputs (Optional[bool], optional): If True, all the initializers (typically corresponding to parameters) in the exported graph will also be added as inputs to the graph. If False, then initializers are not added as inputs to the graph, and only the non-parameter inputs are added as inputs. This may allow for better optimizations (e.g. constant folding) by backends/runtimes. Defaults to None. custom_opsets (Optional[Mapping[str, int]], optional): A dict with schema: * KEY (str): opset domain name * VALUE (int): opset version If a custom opset is referenced by ``model`` but not mentioned in this dictionary, the opset version is set to 1. Only custom opset domain name and version should be indicated through this argument. Defaults to None. export_modules_as_functions (Union[bool, Collection[type[torch.nn.Module]]], optional): Flag to enable exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the particular types of modules to export as local functions in ONNX. This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because ``opset_version`` < 15 implies IR version < 8, which means no local function support. Module variables will be exported as function attributes. There are two categories of function attributes. Defaults to False. use_fast_export (bool, optional): If True, export process will be done in memory. If `module` with total parameter size larger than 2GB, this flag will be automatically set to `False`. If False, temporary export process will be done using temporary files. Defaults to True. apply_transforms (bool, optional): If True, ONNX transforms defined by SqueezeBits.inc will be applied for model optimization. If False, ONNX transformations will be skipped. However, turning this flag to `False` is experimental and might yield unexpected behavior. Defaults to True. simplify (bool, optional): If True, onnx-simplifier will be run. If False, onnx-simplifier will be skipped. Defaults to True. check_n (int, optional): Only available when `simplify=True`. The number of times to run check for the simplified ONNX proto after onnx-simplifier. Defaults to 1. skip_fuse_bn (bool, optional): Only available when `simplify=True`. Whether to skip batchnorm-fusion. Defaults to False. skipped_optimizers (Optional[list[str]], optional): Only available when `simplify=True`. The list of onnx-simplifier passes to skip. Defaults to None. See https://github.com/onnx/optimizer/tree/master/onnxoptimizer/passes for available passes. dynamic_dimensions (Optional[DynamicDimensions], optional): Dynamic dimensions setting configured by `configure_dynamic_dimensions`. Defaults to None. Raises: TypeError: If `f` is not a string. ValueError: If the quantizer has invalid condition. `torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph. `torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it uses an operator that is not supported by the exporter. `torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export. All errors are subclasses of :class:`errors.OnnxExporterError`. """ if not isinstance(f, str): raise TypeError("owlite.onnx.export requires the argument `f` to be a string.") if isinstance(module, GraphModule): if module.meta["owlite_status"] == OwLiteStatus.COMPRESSED: log.warning( "This module has not yet been calibrated. " "The onnx that comes out of this module may have unexpected results in accuracy and latency." ) clip_narrow_range_weights(module) # Batch Norm Fusing
fuse_bn(module)
5
2023-12-08 06:41:50+00:00
16k
qitan/devops-backend-lite
apps/ucenter/views.py
[ { "identifier": "FEISHU_SYNC_USER_JOB_CACHE_KEY", "path": "common/variables.py", "snippet": "FEISHU_SYNC_USER_JOB_CACHE_KEY = 'celery_job:feishu_user_sync'" }, { "identifier": "Menu", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "CustomModelViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset that provides default `create()`, `retrieve()`, `update()`,\n `partial_update()`, `destroy()` and `list()` actions.\n \"\"\"\n\n def get_permission_from_role(self, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def extend_filter(self, queryset):\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Get the list of items for this view.\n This must be an iterable, and may be a queryset.\n Defaults to using `self.queryset`.\n\n This method should always be used rather than accessing `self.queryset`\n directly, as `self.queryset` gets evaluated only once, and those results\n are cached for all subsequent requests.\n\n You may want to override this if you need to provide different\n querysets depending on the incoming request.\n\n (Eg. return a list of items that is specific to the user)\n \"\"\"\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset.distinct()\n\n @action(methods=['GET'], url_path='count', detail=False)\n def count(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n return Response({'code': 20000, 'data': queryset.count()})\n\n def create(self, request, *args, **kwargs):\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n print('exception ', str(e))\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': serializer.errors})\n try:\n self.perform_create(serializer)\n except BaseException as e:\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='创建', content='',\n data=serializer.data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def list(self, request, pk=None, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n page_size = request.query_params.get('page_size')\n pagination.PageNumberPagination.page_size = page_size\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many=True)\n data = {'data': {'total': queryset.count(), 'items': serializer.data},\n 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n partial = kwargs.pop('partial', False)\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n logger.warning(f'不包含name字段: {str(e)}')\n serializer = self.get_serializer(\n instance, data=request.data, partial=partial)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': str(serializer.errors)})\n try:\n self.perform_update(serializer)\n except BaseException as e:\n logger.exception(f'更新失败,原因:{e}')\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n log_audit(request, self.serializer_class.Meta.model.__name__, '更新', content=f\"更新对象:{instance}\",\n data=serializer.data, old_data=self.serializer_class(instance).data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n data = {'data': serializer.data, 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"\n TODO: 删除操作物理删除 or 逻辑删除(增加删除标记字段)\n \"\"\"\n instance = self.get_object()\n try:\n self.perform_destroy(instance)\n except ProtectedError:\n # 存在关联数据,不可删除\n return Response({'code': 50000, 'status': 'failed', 'message': '存在关联数据,禁止删除!'})\n except BaseException as e:\n logger.exception(f'删除数据发生错误 {e}, {e.__class__}')\n return Response({'code': 50000, 'status': 'failed', 'message': f'删除异常: {str(e)}'})\n log_audit(request, self.serializer_class.Meta.model.__name__,\n '删除', content=f\"删除对象:{instance}\")\n\n return Response({'code': 20000, 'status': 'success', 'msg': ''})" }, { "identifier": "CustomModelParentViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelParentViewSet(CustomModelViewSet):\n\n def get_queryset(self):\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if self.action == 'list':\n if not self.request.query_params.get('search'):\n queryset = queryset.filter(parent__isnull=True)\n if isinstance(queryset, QuerySet):\n queryset = queryset.all()\n return queryset.distinct()" }, { "identifier": "RbacPermission", "path": "common/extends/permissions.py", "snippet": "class RbacPermission(BasePermission):\n \"\"\"\n 自定义权限\n \"\"\"\n\n @classmethod\n def check_is_admin(cls, request):\n return request.user.is_authenticated and request.user.roles.filter(name='管理员').count() > 0\n\n @classmethod\n def get_permission_from_role(cls, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def _has_permission(self, request, view):\n \"\"\"\n :return:\n \"\"\"\n _method = request._request.method.lower()\n platform = get_redis_data('platform')\n url_whitelist = platform['whitelist'] if platform else []\n url_whitelist.extend(\n [{'url': '/api/login/feishu/'}, {'url': '/api/login/gitlab/'}])\n path_info = request.path_info\n for item in url_whitelist:\n url = item['url']\n if url in path_info:\n logger.debug(f'请求地址 {path_info} 命中白名单 {url}, 放行')\n return True\n\n from_workflow = 'from_workflow' in request.GET\n if _method == 'get' and from_workflow:\n return True\n\n is_superuser = request.user.is_superuser\n if is_superuser:\n return True\n\n is_admin = RbacPermission.check_is_admin(request)\n perms = self.get_permission_from_role(request)\n if not is_admin and not perms:\n logger.debug(f'用户 {request.user} 不是管理员 且 权限列表为空, 直接拒绝')\n return False\n\n perms_map = view.perms_map\n\n action = view.action\n _custom_method = f'{_method}_{action}'\n for i in perms_map:\n for method, alias in i.items():\n if is_admin and (method == '*' and alias[0] == 'admin'):\n return True\n if method == '*' and alias[0] in perms:\n return True\n if _custom_method and alias[0] in perms and (_custom_method == method or method == f'*_{action}'):\n return True\n if _method == method and alias[0] in perms:\n return True\n return False\n\n def has_permission(self, request, view):\n res = self._has_permission(request, view)\n # 记录权限异常的操作\n if not res:\n AuditLog.objects.create(\n user=request.user, type='', action='拒绝操作',\n action_ip=user_ip(request),\n content=f\"请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data='',\n old_data=''\n )\n return res" }, { "identifier": "CustomInvalidToken", "path": "common/extends/JwtAuth.py", "snippet": "class CustomInvalidToken(InvalidToken):\n status_code = status.HTTP_401_UNAUTHORIZED\n default_detail = 'Token不合法或者已经过期.'\n default_code = 40100" }, { "identifier": "TokenObtainPairSerializer", "path": "common/extends/JwtAuth.py", "snippet": "class TokenObtainPairSerializer(BaseTokenObtainPairSerializer):\n\n default_error_messages = {\n \"no_active_account\": \"用户名或者密码错误!\"\n }\n\n @classmethod\n def get_token(cls, user):\n token = RefreshToken.for_user(user)\n return token" }, { "identifier": "TokenRefreshSerializer", "path": "common/extends/JwtAuth.py", "snippet": "class TokenRefreshSerializer(BaseTokenRefreshSerializer):\n\n def validate(self, attrs):\n refresh = RefreshToken(attrs['refresh'])\n data = {'access': str(refresh.access_token)}\n\n if api_settings.ROTATE_REFRESH_TOKENS:\n if api_settings.BLACKLIST_AFTER_ROTATION:\n try:\n # Attempt to blacklist the given refresh token\n refresh.blacklist()\n except AttributeError:\n # If blacklist app not installed, `blacklist` method will\n # not be present\n pass\n\n refresh.set_jti()\n refresh.set_exp()\n\n data['refresh'] = str(refresh)\n\n return data" }, { "identifier": "log_audit", "path": "common/extends/handler.py", "snippet": "def log_audit(request, action_type, action, content=None, data=None, old_data=None, user=None):\n if user is None:\n user = request.user.first_name or request.user.username\n\n AuditLog.objects.create(user=user, type=action_type, action=action,\n action_ip=user_ip(request),\n content=f\"{mask_sensitive_data(content)}\\n请求方法:{request.method},请求路径:{request.path},UserAgent:{request.META['HTTP_USER_AGENT']}\",\n data=mask_sensitive_data(data),\n old_data=mask_sensitive_data(old_data))" }, { "identifier": "AuditLogFilter", "path": "common/extends/filters.py", "snippet": "class AuditLogFilter(FilterSet):\n exclude = ExcludeFilter(field_name='type', lookup_expr='in', exclude=True)\n type = CharFilter(field_name='type')\n\n class Meta:\n models = AuditLog\n fields = ['type', 'exclude']" }, { "identifier": "CustomSearchFilter", "path": "common/extends/filters.py", "snippet": "class CustomSearchFilter(SearchFilter):\n\n def get_search_fields(self, view, request):\n \"\"\"\n Search fields are obtained from the view, but the request is always\n passed to this method. Sub-classes can override this method to\n dynamically change the search fields based on request content.\n \"\"\"\n if hasattr(view, 'get_search_fields'):\n return view.get_search_fields()\n return getattr(view, 'search_fields', None)\n\n def get_search_terms(self, request):\n \"\"\"\n Search terms are set by a ?search=... query parameter,\n and may be comma and/or whitespace delimited.\n \"\"\"\n params = request.query_params.get(self.search_param, '')\n params = params.replace('\\x00', '') # strip null characters\n values = params.strip('+').split('+')\n if len(values) > 1:\n return values, 1\n params = params.replace(',', ' ')\n params = params.replace('|', ' ')\n return params.split(), 0\n\n def filter_queryset(self, request, queryset, view):\n search_fields = self.get_search_fields(view, request)\n search_param = self.get_search_terms(request)\n search_terms = search_param[0]\n search_condition = search_param[1]\n if not search_fields or not search_terms:\n return queryset\n\n orm_lookups = [\n self.construct_search(str(search_field))\n for search_field in search_fields\n ]\n\n base = queryset\n conditions = []\n for search_term in search_terms:\n queries = [\n models.Q(**{orm_lookup: search_term.strip()})\n for orm_lookup in orm_lookups\n ]\n conditions.append(reduce(operator.or_, queries))\n if search_condition == 1:\n queryset = queryset.filter(reduce(operator.and_, conditions))\n else:\n queryset = queryset.filter(reduce(operator.or_, conditions))\n\n if self.must_call_distinct(queryset, search_fields):\n # Filtering against a many-to-many field requires us to\n # call queryset.distinct() in order to avoid duplicate items\n # in the resulting queryset.\n # We try to avoid this if possible, for performance reasons.\n queryset = distinct(queryset, base)\n return queryset" }, { "identifier": "GlueJenkins", "path": "common/utils/JenkinsAPI.py", "snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)" }, { "identifier": "user_ip", "path": "common/get_ip.py", "snippet": "def user_ip(request):\n \"\"\"\n 获取用户真实IP\n :param request:\n :return:\n \"\"\"\n if 'X-Real-IP' in request.META:\n return request.META['X-Real-IP']\n if 'HTTP_X_FORWARDED_FOR' in request.META:\n return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0]\n if 'REMOTE_ADDR' in request.META:\n return request.META['REMOTE_ADDR'].split(',')[0]" }, { "identifier": "ThirdPartyUser", "path": "common/ext_fun.py", "snippet": "class ThirdPartyUser(object):\n\n def get_user(self):\n user = UserProfile.objects.get_or_create(username='thirdparty')[0]\n self.set_permission(user, self.get_role())\n return user\n\n def get_role(self):\n return Role.objects.get_or_create(name='thirdparty')[0]\n\n def get_perm(self):\n return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0]\n\n def set_permission(self, user, role):\n role.permissions.set([self.get_perm().id])\n user.roles.set([role.id])" }, { "identifier": "set_redis_data", "path": "common/ext_fun.py", "snippet": "def set_redis_data(name, config):\n cache.set(f\"system:{name}\", config, None)" }, { "identifier": "get_redis_data", "path": "common/ext_fun.py", "snippet": "def get_redis_data(name):\n ret = cache.get(f\"system:{name}\")\n if not ret:\n try:\n if name == 'cicd-harbor':\n qs = SystemConfig.objects.filter(type=name)[0]\n else:\n qs = SystemConfig.objects.get(name=name)\n except BaseException as e:\n return None\n ret = json.loads(qs.config)\n set_redis_data(name, ret)\n\n return ret" }, { "identifier": "timeline_generate", "path": "common/ext_fun.py", "snippet": "def timeline_generate(time_range, format_type='dashboard'):\n \"\"\"\n 根据起始时间生成时间线\n\n : params format_type: 默认为dashboard, 用于概览报表粗略显示, 其它用于监控类的展示则使用更细粒度的格式\n \"\"\"\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES\n TIME_FORMAT = DASHBOARD_TIME_FORMAT\n if format_type == 'cmdb':\n TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES_T\n TIME_FORMAT = DASHBOARD_TIME_FORMAT_T\n start_time = time_range['start_time']\n end_time = time_range['end_time']\n time_line = rrule(\n freq=TIME_FREQNAMES[time_range['name']], dtstart=start_time, until=end_time)\n return [i.strftime(TIME_FORMAT[time_range['name']]) for i in time_line]" }, { "identifier": "time_period", "path": "common/ext_fun.py", "snippet": "def time_period(time_range='6-months', type_range='static', time_zone='Asia/Shanghai', name=None):\n \"\"\"\n 根据时间范围生成起止时间\n \"\"\"\n start_time = None\n end_time = timezone.now().astimezone(pytz.timezone(time_zone))\n if type_range == 'dynamic' and name is None:\n start_time = datetime.strptime(time_range[0], '%Y-%m-%d %H:%M:%S')\n end_time = datetime.strptime(time_range[1], '%Y-%m-%d %H:%M:%S')\n if start_time > end_time:\n start_time, end_time = end_time, start_time\n if (end_time - start_time).days >= 60:\n name = 'months'\n elif (end_time - start_time).days >= 2:\n name = 'days'\n elif (end_time - start_time).days >= 1 or (end_time - start_time).seconds > 60 * 60:\n name = 'hours'\n else:\n name = 'minutes'\n return {'name': name, 'start_time': start_time, 'end_time': end_time}\n\n if type_range == 'static':\n _time = time_range.split('-')\n if _time[-1] == 'week':\n start_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second,\n microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] == 'lastweek':\n start_time = end_time - relativedelta(days=end_time.weekday() + 7, hours=end_time.hour,\n minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n end_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute,\n seconds=end_time.second, microseconds=end_time.microsecond)\n return {'name': 'days', 'start_time': start_time, 'end_time': end_time}\n if _time[-1] in ['today', 'yesterday']:\n start_time = end_time - relativedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second,\n microseconds=end_time.microsecond)\n if _time[-1] == 'yesterday':\n end_time = start_time\n start_time = end_time - relativedelta(days=1)\n return {'name': 'hours', 'start_time': start_time, 'end_time': end_time}\n name = _time[1]\n if name is None:\n if _time[1] in ['years', 'months']:\n name = 'months'\n if _time[1] == 'months' and int(_time[0]) < 2:\n name = 'days'\n if _time[1] == 'days' and int(_time[0]) < 2:\n name = 'hours'\n start_time = end_time + relativedelta(**{_time[1]: -int(_time[0])})\n return {'name': name, 'start_time': start_time, 'end_time': end_time}" }, { "identifier": "node_filter", "path": "common/ext_fun.py", "snippet": "def node_filter(node_id, data):\n \"\"\"\n 查找节点\n\n :params: node_id int 节点ID\n :params: data list 节点数组\n \"\"\"\n for i in data:\n if i['id'] == node_id:\n print('get node', i)\n return i\n else:\n if i.get('children', None):\n node = node_filter(node_id, i['children'])\n if isinstance(node, (dict,)):\n return node" }, { "identifier": "test_notify", "path": "qtasks/tasks.py", "snippet": "def test_notify(receiver, notify_type='mail', robot_name=None, robot_webhook=None, robot_key=None,\n robot_type='dingtalk'):\n ret = None\n if notify_type == 'mail':\n mail_send = OmsMail()\n ret = mail_send.test_notify(receiver)\n if notify_type == 'robot':\n robot_notify = ROBOT_CATEGORIES[robot_type](robot_webhook, robot_key)\n ret = robot_notify.test_notify(receiver, robot_name)\n\n return ret" } ]
import hashlib import django_filters import datetime import time import shortuuid import json import logging from django.core.cache import cache from rest_framework import viewsets, status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.decorators import action from rest_framework import pagination from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView from rest_framework_simplejwt.exceptions import TokenError, InvalidToken from rest_framework_simplejwt.authentication import JWTAuthentication from rest_framework_simplejwt.tokens import RefreshToken, Token, OutstandingToken from rest_framework.filters import SearchFilter, OrderingFilter from django_q.tasks import async_task, result from django.contrib.auth.models import update_last_login from django.db.models import Q from django.contrib.auth import logout from common.variables import FEISHU_SYNC_USER_JOB_CACHE_KEY from dbapp.models import Menu, Permission, Role, Organization, UserProfile, AuditLog, SystemConfig, DataDict from ucenter.serializers import MenuSerializers, MenuListSerializers, PermissionListSerializers, PermissionSerializers, \ RoleListSerializers, \ RoleSerializers, OrganizationSerializers, \ UserProfileListSerializers, UserProfileSerializers, UserProfileDetailSerializers, AuditLogSerializers, \ AuditLogActivitySerializers, SystemConfigSerializers, \ SystemConfigListSerializers, DataDictSerializers from common.extends.viewsets import CustomModelViewSet, CustomModelParentViewSet from common.extends.permissions import RbacPermission from common.extends.JwtAuth import CustomInvalidToken, TokenObtainPairSerializer, TokenRefreshSerializer from common.extends.handler import log_audit from common.extends.filters import AuditLogFilter, CustomSearchFilter from common.utils.JenkinsAPI import GlueJenkins from common.get_ip import user_ip from common.ext_fun import ThirdPartyUser, set_redis_data, get_redis_data, timeline_generate, time_period, \ node_filter from qtasks.tasks import test_notify from django.conf import settings from django.contrib.auth import login, REDIRECT_FIELD_NAME from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.cache import never_cache
10,818
def perform_update(self, serializer): serializer.save() cache.delete(f"datadict:{serializer.data['key']}:0") cache.delete(f"datadict:{serializer.data['key']}:1") @action(methods=['GET'], url_path='user', detail=False) def get_user(self, request): """ 获取用户列表 ### 传递参数 force: 0|1 force为1时强制刷新 """ _force = request.query_params.get('force', None) position = request.query_params.get('position', None) _key = str( f'project:users:{self.request.user.id}-{self.request.query_params}') try: data = cache.get(_key) except BaseException as e: cache.delete(_key) data = None if not data or _force: if position: users = UserProfile.objects.exclude( username='thirdparty').filter(position=position) else: users = UserProfile.objects.exclude(username='thirdparty') data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title, 'position': i.position} for i in users] cache.set(_key, data, timeout=60 * 60 * 24) return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='extra', detail=False) def get_by_key(self, request): """ 通过指定key名获取 参数: key """ key_name = request.query_params.get('key', None) instance = self.queryset.get(key=key_name) serializer = self.get_serializer(instance) data = {'data': serializer.data, 'code': 20000, 'status': 'success'} return Response(data) class AuditLogViewSet(CustomModelViewSet): """ 审计日志视图 ### 审计日志权限 {'get': ('audit_list', '查看审计日志')} """ perms_map = ( {'*': ('admin', '管理员')}, {'get': ('audit_list', '查看审计日志')} ) queryset = AuditLog.objects.all() serializer_class = AuditLogSerializers filter_backends = (django_filters.rest_framework.DjangoFilterBackend, CustomSearchFilter, OrderingFilter) filter_class = AuditLogFilter filter_fields = ('user', 'type', 'action', 'action_ip', 'operator') search_fields = ('user', 'type', 'action', 'action_ip', 'content') def create(self, request, *args, **kwargs): pass def update(self, request, *args, **kwargs): pass def destroy(self, request, *args, **kwargs): pass class MenuViewSet(CustomModelParentViewSet): """ 菜单视图 ### 菜单权限 {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} ) queryset = Menu.objects.all() serializer_class = MenuSerializers def get_serializer_class(self): if self.action in ['list', 'retrieve']: return MenuListSerializers return MenuSerializers class PermissionViewSet(CustomModelParentViewSet): """ 权限视图 ### 查看权限列表的权限 {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')} )
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/9/15 下午4:08 @FileName: views.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') DEFAULT_SESSION_TIMEOUT = None class DataDictViewSet(CustomModelParentViewSet): """ 数据字典视图 ### 数据字典权限 {'*': ('data_all', '数据字典管理')}, {'get': ('data_list', '查看数据字典')}, {'post': ('data_create', '创建数据字典')}, {'put': ('data_edit', '编辑数据字典')}, {'patch': ('data_edit', '编辑数据字典')}, {'delete': ('data_delete', '删除数据字典')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('data_all', '数据字典管理')}, {'get': ('data_list', '查看数据字典')}, {'post': ('data_create', '创建数据字典')}, {'put': ('data_edit', '编辑数据字典')}, {'patch': ('data_edit', '编辑数据字典')}, {'delete': ('data_delete', '删除数据字典')} ) queryset = DataDict.objects.all() serializer_class = DataDictSerializers filter_backends = ( django_filters.rest_framework.DjangoFilterBackend, SearchFilter, OrderingFilter) filter_fields = ('key', 'value') search_fields = ('key', 'value') def perform_update(self, serializer): serializer.save() cache.delete(f"datadict:{serializer.data['key']}:0") cache.delete(f"datadict:{serializer.data['key']}:1") @action(methods=['GET'], url_path='user', detail=False) def get_user(self, request): """ 获取用户列表 ### 传递参数 force: 0|1 force为1时强制刷新 """ _force = request.query_params.get('force', None) position = request.query_params.get('position', None) _key = str( f'project:users:{self.request.user.id}-{self.request.query_params}') try: data = cache.get(_key) except BaseException as e: cache.delete(_key) data = None if not data or _force: if position: users = UserProfile.objects.exclude( username='thirdparty').filter(position=position) else: users = UserProfile.objects.exclude(username='thirdparty') data = [{'id': i.id, 'first_name': i.first_name, 'username': i.username, 'name': i.name, 'title': i.title, 'position': i.position} for i in users] cache.set(_key, data, timeout=60 * 60 * 24) return Response({'code': 20000, 'data': data}) @action(methods=['GET'], url_path='extra', detail=False) def get_by_key(self, request): """ 通过指定key名获取 参数: key """ key_name = request.query_params.get('key', None) instance = self.queryset.get(key=key_name) serializer = self.get_serializer(instance) data = {'data': serializer.data, 'code': 20000, 'status': 'success'} return Response(data) class AuditLogViewSet(CustomModelViewSet): """ 审计日志视图 ### 审计日志权限 {'get': ('audit_list', '查看审计日志')} """ perms_map = ( {'*': ('admin', '管理员')}, {'get': ('audit_list', '查看审计日志')} ) queryset = AuditLog.objects.all() serializer_class = AuditLogSerializers filter_backends = (django_filters.rest_framework.DjangoFilterBackend, CustomSearchFilter, OrderingFilter) filter_class = AuditLogFilter filter_fields = ('user', 'type', 'action', 'action_ip', 'operator') search_fields = ('user', 'type', 'action', 'action_ip', 'content') def create(self, request, *args, **kwargs): pass def update(self, request, *args, **kwargs): pass def destroy(self, request, *args, **kwargs): pass class MenuViewSet(CustomModelParentViewSet): """ 菜单视图 ### 菜单权限 {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('menu_all', '菜单管理')}, {'get': ('menu_list', '查看菜单')}, {'post': ('menu_create', '创建菜单')}, {'put': ('menu_edit', '编辑菜单')}, {'patch': ('menu_edit', '编辑菜单')}, {'delete': ('menu_delete', '删除菜单')} ) queryset = Menu.objects.all() serializer_class = MenuSerializers def get_serializer_class(self): if self.action in ['list', 'retrieve']: return MenuListSerializers return MenuSerializers class PermissionViewSet(CustomModelParentViewSet): """ 权限视图 ### 查看权限列表的权限 {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')}, """ perms_map = ( {'*': ('admin', '管理员')}, {'*': ('perm_all', '权限管理')}, {'get': ('perm_list', '查看权限')} )
queryset = Permission.objects.all()
1
2023-12-13 03:09:32+00:00
16k
liujin112/PortraitDiffusion
app.py
[ { "identifier": "AttentionBase", "path": "utils/masactrl_utils.py", "snippet": "class AttentionBase:\n def __init__(self):\n self.cur_step = 0\n self.num_att_layers = -1\n self.cur_att_layer = 0\n\n def after_step(self):\n pass\n\n def __call__(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = self.forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n self.cur_att_layer += 1\n if self.cur_att_layer == self.num_att_layers:\n self.cur_att_layer = 0\n self.cur_step += 1\n # after step\n self.after_step()\n return out\n\n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n out = torch.einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=num_heads)\n return out\n\n def reset(self):\n self.cur_step = 0\n self.cur_att_layer = 0" }, { "identifier": "regiter_attention_editor_diffusers", "path": "utils/masactrl_utils.py", "snippet": "def regiter_attention_editor_diffusers(model, editor: AttentionBase):\n \"\"\"\n Register a attention editor to Diffuser Pipeline, refer from [Prompt-to-Prompt]\n \"\"\"\n def ca_forward(self, place_in_unet):\n def forward(x, encoder_hidden_states=None, attention_mask=None, context=None, mask=None):\n \"\"\"\n The attention is similar to the original implementation of LDM CrossAttention class\n except adding some modifications on the attention\n \"\"\"\n if encoder_hidden_states is not None:\n context = encoder_hidden_states\n if attention_mask is not None:\n mask = attention_mask\n\n to_out = self.to_out\n if isinstance(to_out, nn.modules.container.ModuleList):\n to_out = self.to_out[0]\n else:\n to_out = self.to_out\n\n h = self.heads\n q = self.to_q(x)\n is_cross = context is not None\n context = context if is_cross else x\n k = self.to_k(context)\n v = self.to_v(context)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))\n\n sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if mask is not None:\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n mask = mask[:, None, :].repeat(h, 1, 1)\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim=-1)\n # the only difference\n out = editor(\n q, k, v, sim, attn, is_cross, place_in_unet,\n self.heads, scale=self.scale)\n\n return to_out(out)\n\n return forward\n\n def register_editor(net, count, place_in_unet):\n for name, subnet in net.named_children():\n if net.__class__.__name__ == 'Attention': # spatial Transformer layer\n net.forward = ca_forward(net, place_in_unet)\n return count + 1\n elif hasattr(net, 'children'):\n count = register_editor(subnet, count, place_in_unet)\n return count\n\n cross_att_count = 0\n for net_name, net in model.unet.named_children():\n if \"down\" in net_name:\n cross_att_count += register_editor(net, 0, \"down\")\n elif \"mid\" in net_name:\n cross_att_count += register_editor(net, 0, \"mid\")\n elif \"up\" in net_name:\n cross_att_count += register_editor(net, 0, \"up\")\n editor.num_att_layers = cross_att_count" }, { "identifier": "register_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_upblock2d(model):\n def up_forward(self):\n def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale=None):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n #print(f\"in upblock2d, hidden states shape: {hidden_states.shape}\")\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb, use_reentrant=False\n )\n else:\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"UpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)" }, { "identifier": "register_crossattn_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_crossattn_upblock2d(model):\n def up_forward(self):\n def forward(\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n #print(f\"in crossatten upblock2d, hidden states shape: {hidden_states.shape}\")\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"CrossAttnUpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)" }, { "identifier": "register_free_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_free_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2,source_mask=None):\n def up_forward(self):\n def forward(hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale=None):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n #print(f\"in free upblock2d, hidden states shape: {hidden_states.shape}\")\n \n if self.source_mask is not None:\n spatial_mask_source = F.interpolate(self.source_mask, (hidden_states.shape[2], hidden_states.shape[3]))\n spatial_mask_source_b1 = spatial_mask_source * self.b1 + (1 - spatial_mask_source)\n spatial_mask_source_b2 = spatial_mask_source * self.b2 + (1 - spatial_mask_source)\n # --------------- FreeU code -----------------------\n # Only operate on the first two stages\n if hidden_states.shape[1] == 1280:\n if self.source_mask is not None:\n #where in mask = 0, set hidden states unchanged\n hidden_states[:,:640] = hidden_states[:,:640] * spatial_mask_source_b1\n \n else:\n hidden_states[:,:640] = hidden_states[:,:640] * self.b1\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)\n if hidden_states.shape[1] == 640:\n\n if self.source_mask is not None:\n hidden_states[:,:320] = hidden_states[:,:320] * spatial_mask_source_b2\n else:\n hidden_states[:,:320] = hidden_states[:,:320] * self.b2\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)\n # ---------------------------------------------------------\n\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n if is_torch_version(\">=\", \"1.11.0\"):\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb, use_reentrant=False\n )\n else:\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet), hidden_states, temb\n )\n else:\n hidden_states = resnet(hidden_states, temb)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"UpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)\n setattr(upsample_block, 'b1', b1)\n setattr(upsample_block, 'b2', b2)\n setattr(upsample_block, 's1', s1)\n setattr(upsample_block, 's2', s2)\n setattr(upsample_block, 'source_mask', source_mask)" }, { "identifier": "register_free_crossattn_upblock2d", "path": "utils/free_lunch_utils.py", "snippet": "def register_free_crossattn_upblock2d(model, b1=1.2, b2=1.4, s1=0.9, s2=0.2,source_mask=None):\n def up_forward(self):\n def forward(\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n \n if self.source_mask is not None:\n \n spatial_mask_source = F.interpolate(self.source_mask, (hidden_states.shape[2], hidden_states.shape[3]))\n spatial_mask_source_b1 = spatial_mask_source * self.b1 + (1 - spatial_mask_source)\n spatial_mask_source_b2 = spatial_mask_source * self.b2 + (1 - spatial_mask_source)\n # print(f\"source mask is not none, {spatial_mask_source_b1.shape} with min {spatial_mask_source_b1.min()}\", )\n \n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n #print(f\"in free crossatten upblock2d, hidden states shape: {hidden_states.shape}\")\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n \n # --------------- FreeU code -----------------------\n # Only operate on the first two stages\n if hidden_states.shape[1] == 1280:\n if self.source_mask is not None:\n #where in mask = 0, set hidden states unchanged\n hidden_states[:,:640] = hidden_states[:,:640] * spatial_mask_source_b1\n \n else:\n hidden_states[:,:640] = hidden_states[:,:640] * self.b1\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s1)\n if hidden_states.shape[1] == 640:\n if self.source_mask is not None:\n hidden_states[:,:320] = hidden_states[:,:320] * spatial_mask_source_b2\n else:\n hidden_states[:,:320] = hidden_states[:,:320] * self.b2\n res_hidden_states = Fourier_filter(res_hidden_states, threshold=1, scale=self.s2)\n # ---------------------------------------------------------\n\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(resnet),\n hidden_states,\n temb,\n **ckpt_kwargs,\n )\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n else:\n hidden_states = resnet(hidden_states, temb)\n # hidden_states = attn(\n # hidden_states,\n # encoder_hidden_states=encoder_hidden_states,\n # cross_attention_kwargs=cross_attention_kwargs,\n # encoder_attention_mask=encoder_attention_mask,\n # return_dict=False,\n # )[0]\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n )[0]\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n \n return forward\n \n for i, upsample_block in enumerate(model.unet.up_blocks):\n if isinstance_str(upsample_block, \"CrossAttnUpBlock2D\"):\n upsample_block.forward = up_forward(upsample_block)\n setattr(upsample_block, 'b1', b1)\n setattr(upsample_block, 'b2', b2)\n setattr(upsample_block, 's1', s1)\n setattr(upsample_block, 's2', s2)\n setattr(upsample_block, 'source_mask', source_mask)" }, { "identifier": "MaskPromptedStyleAttentionControl", "path": "utils/style_attn_control.py", "snippet": "class MaskPromptedStyleAttentionControl(AttentionBase):\n def __init__(self, start_step=4, start_layer=10, style_attn_step=35, layer_idx=None, step_idx=None, total_steps=50, style_guidance=0.1, \n only_masked_region=False, guidance=0.0, \n style_mask=None, source_mask=None, de_bug=False):\n \"\"\"\n MaskPromptedSAC\n Args:\n start_step: the step to start mutual self-attention control\n start_layer: the layer to start mutual self-attention control\n layer_idx: list of the layers to apply mutual self-attention control\n step_idx: list the steps to apply mutual self-attention control\n total_steps: the total number of steps\n thres: the thereshold for mask thresholding\n ref_token_idx: the token index list for cross-attention map aggregation\n cur_token_idx: the token index list for cross-attention map aggregation\n mask_save_dir: the path to save the mask image\n \"\"\"\n\n super().__init__()\n self.total_steps = total_steps\n self.total_layers = 16\n self.start_step = start_step\n self.start_layer = start_layer\n self.layer_idx = layer_idx if layer_idx is not None else list(range(start_layer, self.total_layers))\n self.step_idx = step_idx if step_idx is not None else list(range(start_step, total_steps))\n print(\"using MaskPromptStyleAttentionControl\")\n print(\"MaskedSAC at denoising steps: \", self.step_idx)\n print(\"MaskedSAC at U-Net layers: \", self.layer_idx)\n \n self.de_bug = de_bug\n self.style_guidance = style_guidance\n self.only_masked_region = only_masked_region\n self.style_attn_step = style_attn_step\n self.self_attns = []\n self.cross_attns = []\n self.guidance = guidance\n self.style_mask = style_mask\n self.source_mask = source_mask\n\n\n def after_step(self):\n self.self_attns = []\n self.cross_attns = []\n\n def attn_batch(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n \n if q_mask is not None:\n sim = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n if k_mask is not None:\n sim = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n \n attn = sim.softmax(-1) if attn is None else attn\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def attn_batch_fg_bg(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, q_mask,k_mask, **kwargs):\n B = q.shape[0] // num_heads\n H = W = int(np.sqrt(q.shape[1]))\n q = rearrange(q, \"(b h) n d -> h (b n) d\", h=num_heads)\n k = rearrange(k, \"(b h) n d -> h (b n) d\", h=num_heads)\n v = rearrange(v, \"(b h) n d -> h (b n) d\", h=num_heads)\n sim = torch.einsum(\"h i d, h j d -> h i j\", q, k) * kwargs.get(\"scale\")\n if q_mask is not None:\n sim_fg = sim.masked_fill(q_mask.unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(q_mask.unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n if k_mask is not None:\n sim_fg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==0, -torch.finfo(sim.dtype).max)\n sim_bg = sim.masked_fill(k_mask.permute(1,0).unsqueeze(0)==1, -torch.finfo(sim.dtype).max)\n sim = torch.cat([sim_fg, sim_bg])\n attn = sim.softmax(-1)\n\n if len(attn) == 2 * len(v):\n v = torch.cat([v] * 2)\n out = torch.einsum(\"h i j, h j d -> h i d\", attn, v)\n out = rearrange(out, \"(h1 h) (b n) d -> (h1 b) n (h d)\", b=B, h=num_heads)\n return out\n \n def forward(self, q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs):\n\n \"\"\"\n Attention forward function\n \"\"\"\n \n if is_cross or self.cur_step not in self.step_idx or self.cur_att_layer // 2 not in self.layer_idx:\n return super().forward(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, **kwargs)\n\n B = q.shape[0] // num_heads // 2\n H = W = int(np.sqrt(q.shape[1]))\n \n if self.style_mask is not None and self.source_mask is not None:\n #mask = self.aggregate_cross_attn_map(idx=self.cur_token_idx) # (4, H, W)\n heigh, width = self.style_mask.shape[-2:]\n mask_style = self.style_mask# (H, W)\n mask_source = self.source_mask# (H, W)\n scale = int(np.sqrt(heigh * width / q.shape[1]))\n # res = int(np.sqrt(q.shape[1]))\n spatial_mask_source = F.interpolate(mask_source, (heigh//scale, width//scale)).reshape(-1, 1)\n spatial_mask_style = F.interpolate(mask_style, (heigh//scale, width//scale)).reshape(-1, 1)\n \n else:\n spatial_mask_source=None\n spatial_mask_style=None\n\n if spatial_mask_style is None or spatial_mask_source is None:\n \n out_s,out_c,out_t = self.style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n \n else:\n if self.only_masked_region:\n out_s,out_c,out_t = self.mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n else:\n out_s,out_c,out_t = self.separate_mask_prompted_style_attn_ctrl(q, k, v, sim, attn, is_cross, place_in_unet, num_heads, spatial_mask_source,spatial_mask_style,**kwargs)\n\n out = torch.cat([out_s,out_c,out_t],dim=0) \n return out\n \n\n def style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n qs, qc, qt = q.chunk(3)\n\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n\n if self.cur_step < self.style_attn_step:\n out_t = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n else:\n out_t = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c + (out_t - out_c) * self.style_guidance\n return out_s,out_c,out_t\n\n def mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n qs, qc, qt = q.chunk(3)\n \n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n out_c = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], attn[num_heads: 2*num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n out_c_new = self.attn_batch(qc, k[num_heads: 2*num_heads], v[num_heads:2*num_heads], sim[num_heads: 2*num_heads], None, is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None, **kwargs)\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n\n if self.cur_step < self.style_attn_step:\n out_t = out_c #self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n else:\n out_t_fg = self.attn_batch(qt, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg = self.attn_batch(qc, k[:num_heads], v[:num_heads], sim[:num_heads], None, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n if self.style_guidance>=0:\n out_t = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n \n out_t = out_t * spatial_mask_source + out_c * (1 - spatial_mask_source)\n\n if self.de_bug:\n import pdb; pdb.set_trace()\n \n # print(torch.sum(out_t* (1 - spatial_mask_source) - out_c * (1 - spatial_mask_source)))\n return out_s,out_c,out_t\n\n def separate_mask_prompted_style_attn_ctrl(self,q,k,v,sim,attn,is_cross,place_in_unet,num_heads,spatial_mask_source,spatial_mask_style,**kwargs):\n \n if self.de_bug:\n import pdb; pdb.set_trace()\n # To prevent query confusion, render fg and bg according to mask.\n qs, qc, qt = q.chunk(3)\n out_s = self.attn_batch(qs, k[:num_heads], v[:num_heads], sim[:num_heads], attn[:num_heads], is_cross, place_in_unet, num_heads, q_mask=None,k_mask=None,**kwargs)\n if self.cur_step < self.style_attn_step: \n \n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c_fg,out_c_bg = out_c.chunk(2)\n out_t = out_c_fg * spatial_mask_source + out_c_bg * (1 - spatial_mask_source)\n\n else:\n out_t = self.attn_batch_fg_bg(qt, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_c = self.attn_batch_fg_bg(qc, k[:num_heads], v[:num_heads], sim[:num_heads], attn, is_cross, place_in_unet, num_heads, q_mask=spatial_mask_source,k_mask=spatial_mask_style,**kwargs)\n out_t_fg,out_t_bg = out_t.chunk(2)\n out_c_fg,out_c_bg = out_c.chunk(2)\n if self.style_guidance>=0:\n out_t_fg = out_c_fg + (out_t_fg - out_c_fg) * self.style_guidance \n out_t_bg = out_c_bg + (out_t_bg - out_c_bg) * self.style_guidance \n out_t = out_t_fg * spatial_mask_source + out_t_bg * (1 - spatial_mask_source)\n \n return out_s,out_t,out_t" }, { "identifier": "MasaCtrlPipeline", "path": "utils/pipeline.py", "snippet": "class MasaCtrlPipeline(StableDiffusionPipeline):\n\n def next_step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta=0.,\n verbose=False\n ):\n \"\"\"\n Inverse sampling for DDIM Inversion\n \"\"\"\n if verbose:\n print(\"timestep: \", timestep)\n next_step = timestep\n timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999)\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step]\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output\n x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir\n return x_next, pred_x0\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n x: torch.FloatTensor,\n eta: float=0.0,\n verbose=False,\n ):\n \"\"\"\n predict the sampe the next step in the denoise process.\n \"\"\"\n prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5\n pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output\n x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir\n return x_prev, pred_x0\n\n @torch.no_grad()\n def image2latent(self, image):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if type(image) is Image:\n image = np.array(image)\n image = torch.from_numpy(image).float() / 127.5 - 1\n image = image.permute(2, 0, 1).unsqueeze(0).to(DEVICE)\n # input image density range [-1, 1]\n latents = self.vae.encode(image)['latent_dist'].mean\n latents = latents * 0.18215\n return latents\n\n @torch.no_grad()\n def latent2image(self, latents, return_type='np'):\n latents = 1 / 0.18215 * latents.detach()\n image = self.vae.decode(latents)['sample']\n if return_type == 'np':\n image = (image / 2 + 0.5).clamp(0, 1)\n image = image.cpu().permute(0, 2, 3, 1).numpy()[0]\n image = (image * 255).astype(np.uint8)\n elif return_type == \"pt\":\n image = (image / 2 + 0.5).clamp(0, 1)\n\n return image\n\n def latent2image_grad(self, latents):\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents)['sample']\n\n return image # range [-1, 1]\n\n @torch.no_grad()\n def __call__(\n self,\n prompt,\n batch_size=1,\n height=512,\n width=512,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n latents=None,\n unconditioning=None,\n neg_prompt=None,\n ref_intermediate_latents=None,\n return_intermediates=False,\n lcm_lora=False,\n de_bug=False,\n **kwds):\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n if isinstance(prompt, list):\n batch_size = len(prompt)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # if kwds.get(\"dir\"):\n # dir = text_embeddings[-2] - text_embeddings[-1]\n # u, s, v = torch.pca_lowrank(dir.transpose(-1, -2), q=1, center=True)\n # text_embeddings[-1] = text_embeddings[-1] + kwds.get(\"dir\") * v\n # print(u.shape)\n # print(v.shape)\n\n # define initial latents\n latents_shape = (batch_size, self.unet.config.in_channels, height//8, width//8)\n if latents is None:\n latents = torch.randn(latents_shape, device=DEVICE)\n else:\n assert latents.shape == latents_shape, f\"The shape of input latent tensor {latents.shape} should equal to predefined one.\"\n\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n if neg_prompt:\n uc_text = neg_prompt\n else:\n uc_text = \"\"\n # uc_text = \"ugly, tiling, poorly drawn hands, poorly drawn feet, body out of frame, cut off, low contrast, underexposed, distorted face\"\n unconditional_input = self.tokenizer(\n [uc_text] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n # unconditional_input.input_ids = unconditional_input.input_ids[:, 1:]\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # iterative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n # print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n latents_list = [latents]\n pred_x0_list = [latents]\n if de_bug:\n import pdb;pdb.set_trace()\n for i, t in enumerate(tqdm(self.scheduler.timesteps, desc=\"DDIM Sampler\")):\n if ref_intermediate_latents is not None:\n # note that the batch_size >= 2\n latents_ref = ref_intermediate_latents[-1 - i]\n _, latents_cur = latents.chunk(2)\n latents = torch.cat([latents_ref, latents_cur])\n\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n if unconditioning is not None and isinstance(unconditioning, list):\n _, text_embeddings = text_embeddings.chunk(2)\n text_embeddings = torch.cat([unconditioning[i].expand(*text_embeddings.shape), text_embeddings]) \n # predict tghe noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t -> x_t-1\n if lcm_lora:\n latents, pred_x0 = self.scheduler.step(noise_pred, t, latents, return_dict=False)\n else:\n latents, pred_x0 = self.step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n image = self.latent2image(latents, return_type=\"pt\")\n if return_intermediates:\n pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n latents_list = [self.latent2image(img, return_type=\"pt\") for img in latents_list]\n return image, pred_x0_list, latents_list\n return image\n\n @torch.no_grad()\n def invert(\n self,\n image: torch.Tensor,\n prompt,\n num_inference_steps=50,\n guidance_scale=7.5,\n eta=0.0,\n return_intermediates=False,\n **kwds):\n \"\"\"\n invert a real image into noise map with determinisc DDIM inversion\n \"\"\"\n DEVICE = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n batch_size = image.shape[0]\n if isinstance(prompt, list):\n if batch_size == 1:\n image = image.expand(len(prompt), -1, -1, -1)\n elif isinstance(prompt, str):\n if batch_size > 1:\n prompt = [prompt] * batch_size\n\n # text embeddings\n text_input = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0]\n print(\"input text embeddings :\", text_embeddings.shape)\n # define initial latents\n latents = self.image2latent(image)\n start_latents = latents\n # print(latents)\n # exit()\n # unconditional embedding for classifier free guidance\n if guidance_scale > 1.:\n max_length = text_input.input_ids.shape[-1]\n unconditional_input = self.tokenizer(\n [\"\"] * batch_size,\n padding=\"max_length\",\n max_length=77,\n return_tensors=\"pt\"\n )\n unconditional_embeddings = self.text_encoder(unconditional_input.input_ids.to(DEVICE))[0]\n text_embeddings = torch.cat([unconditional_embeddings, text_embeddings], dim=0)\n\n print(\"latents shape: \", latents.shape)\n # interative sampling\n self.scheduler.set_timesteps(num_inference_steps)\n print(\"Valid timesteps: \", reversed(self.scheduler.timesteps))\n # print(\"attributes: \", self.scheduler.__dict__)\n latents_list = [latents]\n pred_x0_list = [latents]\n for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc=\"DDIM Inversion\")):\n if guidance_scale > 1.:\n model_inputs = torch.cat([latents] * 2)\n else:\n model_inputs = latents\n\n # predict the noise\n noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample\n if guidance_scale > 1.:\n noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0)\n noise_pred = noise_pred_uncon + guidance_scale * (noise_pred_con - noise_pred_uncon)\n # compute the previous noise sample x_t-1 -> x_t\n latents, pred_x0 = self.next_step(noise_pred, t, latents)\n latents_list.append(latents)\n pred_x0_list.append(pred_x0)\n\n if return_intermediates:\n # return the intermediate laters during inversion\n # pred_x0_list = [self.latent2image(img, return_type=\"pt\") for img in pred_x0_list]\n return latents, latents_list\n return latents, start_latents" } ]
import os import torch import random import numpy as np import gradio as gr import torch.nn.functional as F from glob import glob from datetime import datetime from diffusers import StableDiffusionPipeline from diffusers import DDIMScheduler, LCMScheduler from PIL import Image,ImageDraw from utils.masactrl_utils import (AttentionBase, regiter_attention_editor_diffusers) from utils.free_lunch_utils import register_upblock2d,register_crossattn_upblock2d,register_free_upblock2d, register_free_crossattn_upblock2d from utils.style_attn_control import MaskPromptedStyleAttentionControl from utils.pipeline import MasaCtrlPipeline from torchvision.utils import save_image from segment_anything import sam_model_registry, SamPredictor
12,178
return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False controller = MaskPromptedStyleAttentionControl(start_step, start_layer, style_attn_step=Style_attn_step, style_guidance=Style_Guidance, style_mask=style_mask, source_mask=source_mask, only_masked_region=only_masked_region, guidance=scale, de_bug=de_bug, ) if freeu: # model.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) print(f'++++++++++++++++++ Run with FreeU {b1}_{b2}_{s1}_{s2} ++++++++++++++++') if Method != "Without mask": register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask)
css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ class GlobalText: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.personalized_model_dir = './models/Stable-diffusion' self.lora_model_dir = './models/Lora' self.savedir = os.path.join(self.basedir, "samples", datetime.now().strftime("Gradio-%Y-%m-%dT%H-%M-%S")) self.savedir_sample = os.path.join(self.savedir, "sample") self.savedir_mask = os.path.join(self.savedir, "mask") self.stable_diffusion_list = ["runwayml/stable-diffusion-v1-5", "latent-consistency/lcm-lora-sdv1-5"] self.personalized_model_list = [] self.lora_model_list = [] # config models self.tokenizer = None self.text_encoder = None self.vae = None self.unet = None self.pipeline = None self.lora_loaded = None self.lcm_lora_loaded = False self.personal_model_loaded = None self.sam_predictor = None self.lora_model_state_dict = {} self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # self.refresh_stable_diffusion() self.refresh_personalized_model() self.reset_start_code() def load_base_pipeline(self, model_path): print(f'loading {model_path} model') scheduler = DDIMScheduler.from_pretrained(model_path,subfolder="scheduler") self.pipeline = MasaCtrlPipeline.from_pretrained(model_path, scheduler=scheduler).to(self.device) def refresh_stable_diffusion(self): self.load_base_pipeline(self.stable_diffusion_list[0]) self.lora_loaded = None self.personal_model_loaded = None self.lcm_lora_loaded = False return self.stable_diffusion_list[0] def refresh_personalized_model(self): personalized_model_list = glob(os.path.join(self.personalized_model_dir, "**/*.safetensors"), recursive=True) self.personalized_model_list = {os.path.basename(file): file for file in personalized_model_list} lora_model_list = glob(os.path.join(self.lora_model_dir, "**/*.safetensors"), recursive=True) self.lora_model_list = {os.path.basename(file): file for file in lora_model_list} def update_stable_diffusion(self, stable_diffusion_dropdown): if stable_diffusion_dropdown == 'latent-consistency/lcm-lora-sdv1-5': self.load_lcm_lora() else: self.load_base_pipeline(stable_diffusion_dropdown) self.lora_loaded = None self.personal_model_loaded = None return gr.Dropdown() def update_base_model(self, base_model_dropdown): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: base_model = self.personalized_model_list[base_model_dropdown] mid_model = StableDiffusionPipeline.from_single_file(base_model) self.pipeline.vae = mid_model.vae self.pipeline.unet = mid_model.unet self.pipeline.text_encoder = mid_model.text_encoder self.pipeline.to(self.device) self.personal_model_loaded = base_model_dropdown.split('.')[0] print(f'load {base_model_dropdown} model success!') return gr.Dropdown() def update_lora_model(self, lora_model_dropdown,lora_alpha_slider): if self.pipeline is None: gr.Info(f"Please select a pretrained model path.") return None else: if lora_model_dropdown == "none": self.pipeline.unfuse_lora() self.pipeline.unload_lora_weights() self.lora_loaded = None print("Restore lora.") else: lora_model_path = self.lora_model_list[lora_model_dropdown] self.pipeline.load_lora_weights(lora_model_path) self.pipeline.fuse_lora(lora_alpha_slider) self.lora_loaded = lora_model_dropdown.split('.')[0] print(f'load {lora_model_dropdown} LoRA Model Success!') return gr.Dropdown() def load_lcm_lora(self, lora_alpha_slider=1.0): # set scheduler self.pipeline = MasaCtrlPipeline.from_pretrained(self.stable_diffusion_list[0]).to(self.device) self.pipeline.scheduler = LCMScheduler.from_config(self.pipeline.scheduler.config) # load LCM-LoRA self.pipeline.load_lora_weights("latent-consistency/lcm-lora-sdv1-5") self.pipeline.fuse_lora(lora_alpha_slider) self.lcm_lora_loaded = True print(f'load LCM-LoRA model success!') def generate(self, source, style, source_mask, style_mask, start_step, start_layer, Style_attn_step, Method, Style_Guidance, ddim_steps, scale, seed, de_bug, target_prompt, negative_prompt_textbox, inter_latents, freeu, b1, b2, s1, s2, width_slider,height_slider, ): os.makedirs(self.savedir, exist_ok=True) os.makedirs(self.savedir_sample, exist_ok=True) os.makedirs(self.savedir_mask, exist_ok=True) model = self.pipeline if seed != -1 and seed != "": torch.manual_seed(int(seed)) else: torch.seed() seed = torch.initial_seed() sample_count = len(os.listdir(self.savedir_sample)) os.makedirs(os.path.join(self.savedir_mask, f"results_{sample_count}"), exist_ok=True) # ref_prompt = [source_prompt, target_prompt] # prompts = ref_prompt+[''] ref_prompt = [target_prompt, target_prompt] prompts = ref_prompt+[target_prompt] source_image,style_image,source_mask,style_mask = load_mask_images(source,style,source_mask,style_mask,self.device,width_slider,height_slider,out_dir=os.path.join(self.savedir_mask, f"results_{sample_count}")) # global START_CODE, LATENTS_LIST with torch.no_grad(): #import pdb;pdb.set_trace() #prev_source if self.start_code is None and self.latents_list is None: content_style = torch.cat([style_image, source_image], dim=0) editor = AttentionBase() regiter_attention_editor_diffusers(model, editor) st_code, latents_list = model.invert(content_style, ref_prompt, guidance_scale=scale, num_inference_steps=ddim_steps, return_intermediates=True) start_code = torch.cat([st_code, st_code[1:]], dim=0) self.start_code = start_code self.latents_list = latents_list else: start_code = self.start_code latents_list = self.latents_list print('------------------------------------------ Use previous latents ------------------------------------------ ') #["Without mask", "Only masked region", "Seperate Background Foreground"] if Method == "Without mask": style_mask = None source_mask = None only_masked_region = False elif Method == "Only masked region": assert style_mask is not None and source_mask is not None only_masked_region = True else: assert style_mask is not None and source_mask is not None only_masked_region = False controller = MaskPromptedStyleAttentionControl(start_step, start_layer, style_attn_step=Style_attn_step, style_guidance=Style_Guidance, style_mask=style_mask, source_mask=source_mask, only_masked_region=only_masked_region, guidance=scale, de_bug=de_bug, ) if freeu: # model.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) print(f'++++++++++++++++++ Run with FreeU {b1}_{b2}_{s1}_{s2} ++++++++++++++++') if Method != "Without mask": register_free_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask)
register_free_crossattn_upblock2d(model, b1=b1, b2=b2, s1=s1, s2=s1,source_mask=source_mask)
5
2023-12-06 01:18:39+00:00
16k
AsuradaYuci/TF-CLIP
datasets/make_dataloader_clipreid.py
[ { "identifier": "VideoDataset", "path": "datasets/video_loader_xh.py", "snippet": "class VideoDataset(Dataset):\n \"\"\"Video Person ReID Dataset.\n Note batch data has shape (batch, seq_len, channel, height, width).\n \"\"\"\n sample_methods = ['evenly', 'random', 'dense']\n\n def __init__(self, dataset, seq_len=15, sample='evenly', transform=None):\n self.dataset = dataset\n self.seq_len = seq_len\n self.sample = sample\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self.__get_single_item__(index) for index in indices]\n return self.__get_single_item__(indices)\n\n def __get_single_item__(self, index):\n S = self.seq_len # 4\n img_paths, pid, camid, trackid = self.dataset[index]\n num = len(img_paths) # 27\n \"\"\"rss 操作\"\"\"\n sample_clip = []\n frame_indices = list(range(num))\n if num < S: # 8 = chunk的数目,每个tracklet分成8段,每段随机选一帧\n strip = list(range(num)) + [frame_indices[-1]] * (S - num)\n for s in range(S):\n pool = strip[s * 1:(s + 1) * 1]\n sample_clip.append(list(pool))\n else:\n inter_val = math.ceil(num / S)\n strip = list(range(num)) + [frame_indices[-1]] * (inter_val * S - num)\n for s in range(S):\n pool = strip[inter_val * s:inter_val * (s + 1)]\n sample_clip.append(list(pool))\n\n sample_clip = np.array(sample_clip)\n\n if self.sample == 'random':\n \"\"\"\n Randomly sample seq_len consecutive frames from num frames,\n if num is smaller than seq_len, then replicate items.\n This sampling strategy is used in training phase.\n \"\"\"\n frame_indices = list(range(num))\n rand_end = max(0, len(frame_indices) - self.seq_len - 1)\n begin_index = random.randint(0, rand_end)\n end_index = min(begin_index + self.seq_len, len(frame_indices))\n\n indices = frame_indices[begin_index:end_index]\n\n for index in indices:\n if len(indices) >= self.seq_len:\n break\n indices.append(index)\n indices = np.array(indices)\n imgseq = []\n for index in indices:\n index = int(index)\n img_path = img_paths[index]\n img = Image.open(img_path).convert('RGB') # 3x224x112\n imgseq.append(img)\n\n seq = [imgseq]\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], dim=0) # seq_len 4x3x224x112\n flow_tensor = None\n\n return img_tensor, pid, camid\n\n elif self.sample == 'dense':\n \"\"\"\n Sample all frames in a video into a list of clips, each clip contains seq_len frames, batch_size needs to be set to 1.\n This sampling strategy is used in test phase.\n \"\"\"\n cur_index = 0\n frame_indices = list(range(num)) # 27\n indices_list = []\n while num-cur_index > self.seq_len:\n indices_list.append(frame_indices[cur_index:cur_index+self.seq_len])\n cur_index += self.seq_len\n\n last_seq = frame_indices[cur_index:]\n\n for index in last_seq:\n if len(last_seq) >= self.seq_len:\n break\n last_seq.append(index)\n\n indices_list.append(last_seq) # <class 'list'>: [[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15], [16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 24, 25, 24, 25, 24, 25]]\n imgs_list = []\n for indices in indices_list: # <class 'list'>: [0, 1, 2, 3, 4, 5, 6, 7]\n imgs = []\n for index in indices:\n index = int(index)\n img_path = img_paths[index]\n img = Image.open(img_path).convert('RGB')\n # img = img.unsqueeze(0)\n imgs.append(img)\n\n imgs = [imgs]\n if self.transform is not None:\n imgs = self.transform(imgs)\n imgs = torch.stack(imgs[0], 0) # torch.Size([8, 3, 224, 112])\n imgs_list.append(imgs)\n imgs_tensor = torch.stack(imgs_list) # torch.Size([13, 8, 3, 224, 112])\n # flow_tensor = None\n return imgs_tensor, pid, camid, trackid, \"\"\n\n elif self.sample == 'rrs_train':\n idx = np.random.choice(sample_clip.shape[1], sample_clip.shape[0])\n number = sample_clip[np.arange(len(sample_clip)), idx]\n # imgseq = []\n img_paths = np.array(list(img_paths)) # img_paths原始为tuple,转换成数组\n # flow_paths = np.array([img_path.replace('Mars', 'Mars_optical') for img_path in img_paths])\n imgseq = [Image.open(img_path).convert('RGB') for img_path in img_paths[number]]\n # flowseq = [Image.open(flow_path).convert('RGB') for flow_path in flow_paths[number]]\n\n seq = [imgseq]\n # seq = [imgseq, flowseq]\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], dim=0) # seq_len 4x3x224x112\n # flow_tensor = torch.stack(seq[1], dim=0) # seq_len 4x3x224x112\n\n return img_tensor, pid, camid, trackid, \"\"\n\n elif self.sample == 'rrs_test':\n number = sample_clip[:, 0]\n img_paths = np.array(list(img_paths)) # img_paths原始为tuple,转换成数组\n # flow_paths = np.array([img_path.replace('Mars', 'Mars_optical') for img_path in img_paths])\n imgseq = [Image.open(img_path).convert('RGB') for img_path in img_paths[number]]\n # flowseq = [Image.open(flow_path).convert('RGB') for flow_path in flow_paths[number]]\n\n seq = [imgseq]\n # seq = [imgseq, flowseq]\n if self.transform is not None:\n seq = self.transform(seq)\n img_tensor = torch.stack(seq[0], dim=0) # torch.Size([8, 3, 256, 128])\n # flow_tensor = torch.stack(seq[1], dim=0)\n return img_tensor, pid, camid, trackid, \"\"\n else:\n raise KeyError(\"Unknown sample method: {}. Expected one of {}\".format(self.sample, self.sample_methods))" }, { "identifier": "RandomIdentitySampler", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySampler(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Args:\n - data_source (Dataset): dataset to sample from.\n - num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, batch_size, num_instances=4):\n self.data_source = data_source\n self.batch_size = batch_size # 16\n self.num_instances = num_instances # 4\n self.num_pids_per_batch = self.batch_size // self.num_instances # 4\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n # compute number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances # 7532\n\n def __iter__(self):\n\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids: # 每个Pid选择4个序列\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch: # 选择P个ID\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n\n return iter(final_idxs)\n\n def __len__(self):\n return self.length" }, { "identifier": "RandomIdentitySamplerForSeq", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySamplerForSeq(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Args:\n - data_source (Dataset): dataset to sample from.\n - num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, batch_size, num_instances=4):\n self.data_source = data_source\n self.batch_size = batch_size # 256\n self.num_instances = num_instances # K=4\n self.num_pids_per_batch = self.batch_size // self.num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids) # 625\n\n # compute number of examples in an epoch\n self.length = 0\n for pid in self.pids:\n idxs = self.index_dic[pid]\n num = len(idxs)\n if num < self.num_instances:\n num = self.num_instances\n self.length += num - num % self.num_instances\n\n def __iter__(self):\n\n batch_idxs_dict = defaultdict(list)\n\n for pid in self.pids:\n idxs = copy.deepcopy(self.index_dic[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True)\n random.shuffle(idxs)\n batch_idxs = []\n for idx in idxs:\n batch_idxs.append(idx)\n if len(batch_idxs) == self.num_instances:\n batch_idxs_dict[pid].append(batch_idxs)\n batch_idxs = []\n\n avai_pids = copy.deepcopy(self.pids)\n final_idxs = []\n\n while len(avai_pids) >= self.num_pids_per_batch:\n selected_pids = random.sample(avai_pids, self.num_pids_per_batch)\n for pid in selected_pids:\n batch_idxs = batch_idxs_dict[pid].pop(0)\n final_idxs.extend(batch_idxs)\n if len(batch_idxs_dict[pid]) == 0:\n avai_pids.remove(pid)\n\n return iter(final_idxs)\n\n def __len__(self):\n return self.length" }, { "identifier": "RandomIdentitySamplerWYQ", "path": "datasets/samplers.py", "snippet": "class RandomIdentitySamplerWYQ(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.\n\n Args:\n data_source (Dataset): dataset to sample from.\n num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, num_instances=4):\n super(RandomIdentitySampler).__init__()\n self.data_source = data_source\n self.num_instances = num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n def __iter__(self):\n indices = torch.randperm(self.num_identities)\n ret = []\n for i in indices:\n pid = self.pids[i]\n t = self.index_dic[pid]\n replace = False if len(t) >= self.num_instances else True\n t = np.random.choice(t, size=self.num_instances, replace=replace)\n ret.extend(t)\n # print(ret)\n return iter(ret)\n\n def __len__(self):\n return self.num_identities * self.num_instances" }, { "identifier": "SeqTrainPreprocessor", "path": "datasets/seqpreprocessor.py", "snippet": "class SeqTrainPreprocessor(object):\n def __init__(self, seqset, dataset, seq_len, transform=None):\n super(SeqTrainPreprocessor, self).__init__()\n self.seqset = seqset\n self.identities = dataset.identities\n self.transform = transform\n self.seq_len = seq_len\n self.root = [dataset.images_dir]\n self.root.append(dataset.other_dir)\n\n def __len__(self):\n return len(self.seqset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n\n start_ind, end_ind, pid, label, camid = self.seqset[index]\n\n imgseq = []\n flowseq = []\n for ind in range(start_ind, end_ind):\n fname = self.identities[pid][camid][ind]\n fpath_img = osp.join(self.root[0], fname)\n imgrgb = Image.open(fpath_img).convert('RGB')\n fpath_flow = osp.join(self.root[1], fname)\n flowrgb = Image.open(fpath_flow).convert('RGB')\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n while len(imgseq) < self.seq_len:\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n seq = [imgseq, flowseq]\n\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], 0)\n\n flow_tensor = torch.stack(seq[1], 0)\n\n return img_tensor, flow_tensor, label, camid" }, { "identifier": "SeqTestPreprocessor", "path": "datasets/seqpreprocessor.py", "snippet": "class SeqTestPreprocessor(object):\n\n def __init__(self, seqset, dataset, seq_len, transform=None):\n super(SeqTestPreprocessor, self).__init__()\n self.seqset = seqset\n self.identities = dataset.identities\n self.transform = transform\n self.seq_len = seq_len\n self.root = [dataset.images_dir]\n self.root.append(dataset.other_dir)\n\n def __len__(self):\n return len(self.seqset)\n\n def __getitem__(self, indices):\n if isinstance(indices, (tuple, list)):\n return [self._get_single_item(index) for index in indices]\n return self._get_single_item(indices)\n\n def _get_single_item(self, index):\n\n start_ind, end_ind, pid, label, camid = self.seqset[index]\n\n imgseq = []\n flowseq = []\n for ind in range(start_ind, end_ind):\n fname = self.identities[pid][camid][ind]\n fpath_img = osp.join(self.root[0], fname)\n imgrgb = Image.open(fpath_img).convert('RGB')\n fpath_flow = osp.join(self.root[1], fname)\n flowrgb = Image.open(fpath_flow).convert('RGB')\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n while len(imgseq) < self.seq_len:\n imgseq.append(imgrgb)\n flowseq.append(flowrgb)\n\n seq = [imgseq, flowseq]\n\n if self.transform is not None:\n seq = self.transform(seq)\n\n img_tensor = torch.stack(seq[0], 0)\n\n if len(self.root) == 2:\n flow_tensor = torch.stack(seq[1], 0)\n else:\n flow_tensor = None\n\n return img_tensor, flow_tensor, pid, camid" }, { "identifier": "Mars", "path": "datasets/set/mars.py", "snippet": "class Mars(object):\n root = '/18640539002/dataset_cc/Mars/'\n train_name_path = osp.join(root, 'info/train_name.txt')\n test_name_path = osp.join(root, 'info/test_name.txt')\n track_train_info_path = osp.join(root, 'info/tracks_train_info.mat')\n track_test_info_path = osp.join(root, 'info/tracks_test_info.mat')\n query_IDX_path = osp.join(root, 'info/query_IDX.mat')\n split_train_json_path = osp.join(root, 'split_train.json')\n split_query_json_path = osp.join(root, 'split_query.json')\n split_gallery_json_path = osp.join(root, 'split_gallery.json')\n \n def __init__(self, root= '../data/Mars/', min_seq_len=0):\n self._check_before_run()\n\n train_names = self._get_names(self.train_name_path) # <class 'list'>: <Len: 509914> '0001C1T0001F001.jpg'\n test_names = self._get_names(self.test_name_path) # <class 'list'>: <Len: 681089> '00-1C1T0001F001.jpg'\n track_train = loadmat(self.track_train_info_path)[\n 'track_train_info'] # numpy.ndarray (8298, 4) [[1 16 1 1],[17 95 1 1] ...]\n track_test = loadmat(self.track_test_info_path)[\n 'track_test_info'] # numpy.ndarray (12180, 4) [[1 24 -1 1][25 34 -1 1]]\n \n query_IDX = loadmat(self.query_IDX_path)['query_IDX'].squeeze() # numpy.ndarray (1980,) [4130, 4138...]\n query_IDX -= 1 # index from 0 [4129,4137....]\n track_query = track_test[query_IDX, :] # 对应行的小段视频信息,[[171610 171649 2 1],[172214 172313 2 2]...]\n \n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX] # gallery = 10200\n track_gallery = track_test[gallery_IDX, :] # <class 'tuple'>: (12180, 4) [[1 24 -1 1][25 34 -1 1]...]\n\n train, num_train_tracklets, num_train_pids, num_train_imgs, num_train_cams, num_train_vids = \\\n self._process_data(train_names, track_train, home_dir='bbox_train', relabel=True,\n min_seq_len=min_seq_len, json_path=self.split_train_json_path)\n\n query, num_query_tracklets, num_query_pids, num_query_imgs, query_pid, query_camid = \\\n self._process_gallery_data(test_names, track_query, home_dir='bbox_test', relabel=False,\n min_seq_len=min_seq_len, json_path=self.split_query_json_path,)\n\n gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs, gallery_pid, gallery_camid = \\\n self._process_gallery_data(test_names, track_gallery, home_dir='bbox_test', relabel=False,\n min_seq_len=min_seq_len, json_path=self.split_gallery_json_path)\n\n num_imgs_per_tracklet = num_train_imgs + num_query_imgs + num_gallery_imgs\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_query_pids\n num_total_tracklets = num_train_tracklets + num_query_tracklets + num_gallery_tracklets\n\n print(\"=> MARS loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n self.queryinfo = infostruct()\n self.queryinfo.pid = query_pid\n self.queryinfo.camid = query_camid\n self.queryinfo.tranum = num_query_imgs\n\n self.galleryinfo = infostruct()\n self.galleryinfo.pid = gallery_pid\n self.galleryinfo.camid = gallery_camid\n self.galleryinfo.tranum = num_gallery_imgs\n\n self.num_train_cams = num_train_cams\n self.num_train_vids = num_train_vids\n \n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))\n \n def _get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n names.append(new_line)\n return names\n \n def _process_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0, json_path=''):\n if osp.exists(json_path):\n print(\"=> {} generated before, awesome!\".format(json_path))\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['num_cams'], split['num_tracks']\n print(\"=> Automatically generating split (might take a while for the first time, have a coffe)\")\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0] # 8298 TODO: 要不要增加?\n pid_list = list(set(meta_data[:, 2].tolist())) # pid = 625 => [1 3 5 7 9...]\n num_pids = len(pid_list)\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)} # {1:0,3:1,5:2,...}\n tracklets = []\n num_imgs_per_tracklet = []\n cams = []\n \n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...] # [1 16 1 1]\n start_index, end_index, pid, camid = data\n \n cams += [int(camid)]\n \n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid] # pid = 0\n camid -= 1\n # index starts from 0\n img_names = names[start_index - 1:end_index]\n # <class 'list'>:['0001C1T0001F001.jpg'.. '0001C1T0001F016.jpg']\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names] # pnames = ['0001','0001'...]\n assert len(set(pnames)) == 1, \"Error: a single tracklet contains different person images\"\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names] # camnames = ['1','1'...]\n assert len(set(camnames)) == 1, \"Error: images are captured under different cameras!\"\n\n # append image names with directory information\n # '/media/ying/0BDD17830BDD1783/ReIdDataset/Mars/bbox_train/0001/0001C1T0001F001.jpg'\n img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names] # list<16>\n # print(img_paths)\n \n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, int(pid), int(camid), 1)) # (('.jpg','.jpg','每张图片的路径'), 0'行人id', 0'camid' trackid)\n num_imgs_per_tracklet.append(len(img_paths)) # [16,79,15...'每个小段视频包含的图片帧数目']\n\n num_tracklets = len(tracklets) # 8298\n\n cams = set(cams)\n num_cams = len(cams)\n\n print(\"Saving split to {}\".format(json_path))\n split_dict = {\n 'tracklets': tracklets,\n 'num_tracklets': num_tracklets,\n 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet,\n 'num_cams' : num_cams,\n 'num_tracks' : 1\n }\n write_json(split_dict, json_path)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, num_cams, 1\n \n def _process_gallery_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0, json_path=''):\n if osp.exists(json_path):\n print(\"=> {} generated before, awesome!\".format(json_path))\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['pids'], split['camid']\n\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0] # 8298 TODO: 要不要增加?\n pid_list = list(set(meta_data[:, 2].tolist())) # pid = 625 => [1 3 5 7 9...]\n num_pids = len(pid_list) # 626 622\n\n if relabel:\n pid2label = {pid: label for label, pid in enumerate(pid_list)} # {1:0,3:1,5:2,...}\n tracklets = []\n num_imgs_per_tracklet = []\n gallery_pid = []\n gallery_camid = []\n\n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx, ...] # [1 16 1 1]\n start_index, end_index, pid, camid = data\n\n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel:\n pid = pid2label[pid] # pid = 0\n camid -= 1\n # index starts from 0\n img_names = names[start_index - 1:end_index]\n # <class 'list'>:['0001C1T0001F001.jpg'.. '0001C1T0001F016.jpg']\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names] # pnames = ['0001','0001'...]\n assert len(set(pnames)) == 1, \"Error: a single tracklet contains different person images\"\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names] # camnames = ['1','1'...]\n assert len(set(camnames)) == 1, \"Error: images are captured under different cameras!\"\n\n # append image names with directory information\n # '/media/ying/0BDD17830BDD1783/ReIdDataset/Mars/bbox_train/0001/0001C1T0001F001.jpg'\n img_paths = [osp.join(self.root, home_dir, img_name[:4], img_name) for img_name in img_names] # list<16>\n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, int(pid), int(camid), 1)) # (('.jpg','.jpg','每张图片的路径'), 0'行人id', 0'camid' )\n num_imgs_per_tracklet.append(len(img_paths)) # [16,79,15...'每个小段视频包含的图片帧数目']\n gallery_pid.append(int(pid))\n gallery_camid.append(int(camid))\n num_tracklets = len(tracklets) # 8298\n print(\"Saving split to {}\".format(json_path))\n split_dict = {\n 'tracklets': tracklets,\n 'num_tracklets': num_tracklets,\n 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet,\n 'pids': gallery_pid,\n 'camid': gallery_camid,\n }\n write_json(split_dict, json_path)\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, gallery_pid, gallery_camid" }, { "identifier": "iLIDSVIDSEQUENCE", "path": "datasets/set/ilidsvidsequence.py", "snippet": "class iLIDSVIDSEQUENCE(Datasequence):\n\n def __init__(self, root, split_id=0, seq_len=12, seq_srd=6, num_val=1, download=False):\n super(iLIDSVIDSEQUENCE, self).__init__(root, split_id=split_id)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n self.imgextract()\n # --> load完后就有了train,val,和trainval,实际上最开始只有trainval,我们按照num_val\n self.load(seq_len, seq_srd, num_val)\n self.num_train_cams = 2\n self.num_train_vids = 1\n\n self.query, query_pid, query_camid, query_num = self._pluckseq_cam(self.identities, self.split['query'],\n seq_len, seq_srd, 0)\n self.queryinfo = infostruct()\n self.queryinfo.pid = query_pid\n self.queryinfo.camid = query_camid\n self.queryinfo.tranum = query_num\n\n self.gallery, gallery_pid, gallery_camid, gallery_num = self._pluckseq_cam(self.identities,\n self.split['gallery'],\n seq_len, seq_srd, 1)\n self.galleryinfo = infostruct()\n self.galleryinfo.pid = gallery_pid\n self.galleryinfo.camid = gallery_camid\n self.galleryinfo.tranum = gallery_num\n\n @property\n def other_dir(self):\n return osp.join(self.root, 'others')\n\n def download(self):\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n raw_dir = osp.join(self.root, 'raw')\n mkdir_if_missing(raw_dir)\n\n fpath1 = osp.join(raw_dir, datasetname + '.tar')\n fpath2 = osp.join(raw_dir, flowname + '.tar')\n\n if osp.isfile(fpath1) and osp.isfile(fpath2):\n print(\"Using the download file:\" + fpath1 + \" \" + fpath2)\n else:\n print(\"Please firstly download the files\")\n raise RuntimeError(\"Downloaded file missing!\")\n\n def imgextract(self):\n\n raw_dir = osp.join(self.root, 'raw')\n exdir1 = osp.join(raw_dir, datasetname)\n exdir2 = osp.join(raw_dir, flowname)\n fpath1 = osp.join(raw_dir, datasetname + '.tar')\n fpath2 = osp.join(raw_dir, flowname + '.tar')\n\n if not osp.isdir(exdir1):\n print(\"Extracting tar file\")\n cwd = os.getcwd()\n tar = tarfile.open(fpath1)\n mkdir_if_missing(exdir1)\n os.chdir(exdir1)\n tar.extractall()\n tar.close()\n os.chdir(cwd)\n\n if not osp.isdir(exdir2):\n print(\"Extracting tar file\")\n cwd = os.getcwd()\n tar = tarfile.open(fpath2)\n mkdir_if_missing(exdir2)\n os.chdir(exdir2)\n tar.extractall()\n tar.close()\n os.chdir(cwd)\n\n # reorganzing the dataset\n # Format\n\n temp_images_dir = osp.join(self.root, 'temp_images')\n mkdir_if_missing(temp_images_dir)\n\n temp_others_dir = osp.join(self.root, 'temp_others')\n mkdir_if_missing(temp_others_dir)\n\n images_dir = osp.join(self.root, 'images')\n mkdir_if_missing(images_dir)\n\n others_dir = osp.join(self.root, 'others')\n mkdir_if_missing(others_dir)\n\n fpaths1 = sorted(glob(osp.join(exdir1, 'i-LIDS-VID/sequences', '*/*/*.png')))\n fpaths2 = sorted(glob(osp.join(exdir2, flowname, '*/*/*.png')))\n\n identities_imgraw = [[[] for _ in range(2)] for _ in range(319)]\n identities_otherraw = [[[] for _ in range(2)] for _ in range(319)]\n\n # image information\n for fpath in fpaths1:\n fname = osp.basename(fpath)\n fname_list = fname.split('_')\n cam_name = fname_list[0]\n pid_name = fname_list[1]\n cam = int(cam_name[-1])\n pid = int(pid_name[-3:])\n temp_fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, len(identities_imgraw[pid - 1][cam - 1])))\n identities_imgraw[pid - 1][cam - 1].append(temp_fname)\n shutil.copy(fpath, osp.join(temp_images_dir, temp_fname))\n\n identities_temp = [x for x in identities_imgraw if x != [[], []]]\n identities_images = identities_temp\n\n for pid in range(len(identities_temp)):\n for cam in range(2):\n for img in range(len(identities_images[pid][cam])):\n temp_fname = identities_temp[pid][cam][img]\n fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, img))\n identities_images[pid][cam][img] = fname\n shutil.copy(osp.join(temp_images_dir, temp_fname), osp.join(images_dir, fname))\n\n shutil.rmtree(temp_images_dir)\n\n # flow information\n\n for fpath in fpaths2:\n fname = osp.basename(fpath)\n fname_list = fname.split('_')\n cam_name = fname_list[0]\n pid_name = fname_list[1]\n cam = int(cam_name[-1])\n pid = int(pid_name[-3:])\n temp_fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, len(identities_otherraw[pid - 1][cam - 1])))\n identities_otherraw[pid - 1][cam - 1].append(temp_fname)\n shutil.copy(fpath, osp.join(temp_others_dir, temp_fname))\n\n identities_temp = [x for x in identities_otherraw if x != [[], []]]\n identities_others = identities_temp\n\n for pid in range(len(identities_temp)):\n for cam in range(2):\n for img in range(len(identities_others[pid][cam])):\n temp_fname = identities_temp[pid][cam][img]\n fname = ('{:08d}_{:02d}_{:04d}.png'\n .format(pid, cam, img))\n identities_others[pid][cam][img] = fname\n shutil.copy(osp.join(temp_others_dir, temp_fname), osp.join(others_dir, fname))\n\n shutil.rmtree(temp_others_dir)\n\n meta = {'name': 'iLIDS-sequence', 'shot': 'sequence', 'num_cameras': 2,\n 'identities': identities_images}\n\n write_json(meta, osp.join(self.root, 'meta.json'))\n\n # Consider fixed training and testing split\n splitmat_name = osp.join(exdir1, 'train-test people splits', 'train_test_splits_ilidsvid.mat')\n data = sio.loadmat(splitmat_name)\n person_list = data['ls_set']\n num = len(identities_images)\n splits = []\n\n for i in range(10):\n pids = (person_list[i] - 1).tolist()\n trainval_pids = sorted(pids[:num // 2])\n test_pids = sorted(pids[num // 2:])\n split = {'trainval': trainval_pids,\n 'query': test_pids,\n 'gallery': test_pids}\n splits.append(split)\n write_json(splits, osp.join(self.root, 'splits.json'))\n\n def _pluckseq_cam(self, identities, indices, seq_len, seq_str, camid):\n # --> query和gallery与 trainval不同的是\n # --> trainval是用来训练的,所以怎么处理都行\n # --> query和gallery是来模拟实际场景的,所以不能用那种重复采样的方法扩充两个数据集\n # --> 另外要求是不同镜头下的,所以加一个camid\n\n ret = []\n per_id = []\n cam_id = []\n tra_num = []\n\n for index, pid in enumerate(indices):\n pid_images = identities[pid]\n cam_images = pid_images[camid]\n seqall = len(cam_images)\n seq_inds = [(start_ind, start_ind + seq_len) for start_ind in range(0, seqall - seq_len, seq_str)]\n if not seq_inds:\n seq_inds = [(0, seqall)]\n for seq_ind in seq_inds:\n ret.append((seq_ind[0], seq_ind[1], pid, index, camid))\n per_id.append(pid)\n cam_id.append(camid)\n tra_num.append(len(seq_inds))\n return ret, per_id, cam_id, tra_num" }, { "identifier": "LSVID", "path": "datasets/set/lsvid.py", "snippet": "class LSVID(object):\n\n def __init__(self, root=None, sampling_step=48, *args, **kwargs):\n self._root = root\n self.train_name_path = osp.join(self._root, 'info/list_sequence/list_seq_train.txt')\n self.test_name_path = osp.join(self._root, 'info/list_sequence/list_seq_test.txt')\n self.query_IDX_path = osp.join(self._root, 'info/data/info_test.mat')\n\n self._check_before_run()\n\n # prepare meta data\n track_train = self._get_names(self.train_name_path)\n track_test = self._get_names(self.test_name_path)\n\n track_train = np.array(track_train)\n track_test = np.array(track_test)\n\n query_IDX = h5py.File(self.query_IDX_path, mode='r')['query'][0,:] # numpy.ndarray (1980,)\n query_IDX = np.array(query_IDX, dtype=int)\n\n query_IDX -= 1 # index from 0\n track_query = track_test[query_IDX, :]\n\n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX]\n track_gallery = track_test[gallery_IDX, :]\n\n self.split_train_dense_json_path = osp.join(self._root,'split_train_dense_{}.json'.format(sampling_step))\n self.split_train_json_path = osp.join(self._root, 'split_train.json')\n self.split_query_json_path = osp.join(self._root, 'split_query.json')\n self.split_gallery_json_path = osp.join(self._root, 'split_gallery.json')\n\n train, num_train_tracklets, num_train_pids, num_train_imgs, num_train_cams, num_train_vids = \\\n self._process_data(track_train, json_path=self.split_train_json_path, relabel=True)\n\n train_dense, num_train_tracklets_dense, num_train_pids_dense, num_train_imgs_dense, _, _ = \\\n self._process_data(track_train, json_path=self.split_train_dense_json_path, relabel=True, sampling_step=sampling_step)\n\n query, num_query_tracklets, num_query_pids, num_query_imgs, _, _ = \\\n self._process_data(track_query, json_path=self.split_query_json_path, relabel=False)\n\n gallery, num_gallery_tracklets, num_gallery_pids, num_gallery_imgs, _, _ = \\\n self._process_data(track_gallery, json_path=self.split_gallery_json_path, relabel=False)\n\n num_imgs_per_tracklet = num_train_imgs + num_gallery_imgs + num_query_imgs\n min_num = np.min(num_imgs_per_tracklet)\n max_num = np.max(num_imgs_per_tracklet)\n avg_num = np.mean(num_imgs_per_tracklet)\n\n num_total_pids = num_train_pids + num_gallery_pids\n num_total_tracklets = num_train_tracklets + num_gallery_tracklets + num_query_tracklets\n\n print(\"=> LS-VID loaded\")\n print(\"Dataset statistics:\")\n print(\" ------------------------------\")\n print(\" subset | # ids | # tracklets\")\n print(\" ------------------------------\")\n print(\" train | {:5d} | {:8d}\".format(num_train_pids, num_train_tracklets))\n if sampling_step != 0:\n print(\" train_d | {:5d} | {:8d}\".format(num_train_pids_dense, num_train_tracklets_dense))\n print(\" query | {:5d} | {:8d}\".format(num_query_pids, num_query_tracklets))\n print(\" gallery | {:5d} | {:8d}\".format(num_gallery_pids, num_gallery_tracklets))\n print(\" ------------------------------\")\n print(\" total | {:5d} | {:8d}\".format(num_total_pids, num_total_tracklets))\n print(\" number of images per tracklet: {} ~ {}, average {:.1f}\".format(min_num, max_num, avg_num))\n print(\" ------------------------------\")\n\n if sampling_step != 0:\n self.train = train_dense\n else:\n self.train = train\n self.query = query\n self.gallery = gallery\n\n self.num_train_pids = num_train_pids\n self.num_query_pids = num_query_pids\n self.num_gallery_pids = num_gallery_pids\n\n self.num_train_cams = num_train_cams\n self.num_train_vids = num_train_vids\n\n def _check_before_run(self):\n \"\"\"Check if all files are available before going deeper\"\"\"\n if not osp.exists(self._root):\n raise RuntimeError(\"'{}' is not available\".format(self._root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))\n\n def _get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n basepath, pid = new_line.split(' ')\n names.append([basepath, int(pid)])\n return names\n\n def _process_data(self,\n meta_data,\n relabel=False,\n json_path=None,\n sampling_step=0):\n if osp.exists(json_path):\n split = read_json(json_path)\n return split['tracklets'], split['num_tracklets'], split['num_pids'], split['num_imgs_per_tracklet'], split['num_cams'], split['num_tracks']\n\n num_tracklets = meta_data.shape[0]\n pid_list = list(set(meta_data[:, 1].tolist()))\n num_pids = len(pid_list)\n\n if relabel: pid2label = {int(pid): label for label, pid in enumerate(pid_list)}\n tracklets = []\n num_imgs_per_tracklet = []\n cams = []\n\n for tracklet_idx in range(num_tracklets):\n tracklet_path = osp.join(self._root, meta_data[tracklet_idx, 0]) + '*'\n img_paths = glob.glob(tracklet_path) # avoid .DS_Store\n img_paths.sort()\n pid = int(meta_data[tracklet_idx, 1])\n _, _, camid, _ = osp.basename(img_paths[0]).split('_')[:4]\n cams += [int(camid)]\n camid = int(camid)\n\n if pid == -1: continue # junk images are just ignored\n assert 1 <= camid <= 15\n if relabel: pid = pid2label[pid]\n camid -= 1 # index starts from 0\n \n if sampling_step == 0:\n tracklets.append((img_paths, pid, camid, 1))\n else:\n num_sampling = len(img_paths) // sampling_step\n for idx in range(num_sampling):\n if idx == num_sampling - 1:\n tracklets.append((img_paths[idx * sampling_step:], pid, camid, 1))\n else:\n tracklets.append((img_paths[idx * sampling_step: (idx + 1) * sampling_step], pid, camid, 1))\n num_imgs_per_tracklet.append(len(img_paths))\n\n num_tracklets = len(tracklets)\n cams = set(cams)\n num_cams = len(cams)\n\n print(\"Saving split to {}\".format(json_path))\n split_dict = {'tracklets': tracklets, 'num_tracklets': num_tracklets, 'num_pids': num_pids,\n 'num_imgs_per_tracklet': num_imgs_per_tracklet, 'num_cams' : num_cams, 'num_tracks' : 1}\n write_json(split_dict, json_path)\n\n return tracklets, num_tracklets, num_pids, num_imgs_per_tracklet, num_cams, 1" } ]
import torch import utils.spatial_transforms as ST import utils.temporal_transforms as TT import utils.transforms as T import utils.seqtransforms as SeqT from torch.utils.data import DataLoader from datasets.video_loader_xh import VideoDataset from datasets.samplers import RandomIdentitySampler, RandomIdentitySamplerForSeq, RandomIdentitySamplerWYQ from datasets.seqpreprocessor import SeqTrainPreprocessor, SeqTestPreprocessor from datasets.set.mars import Mars from datasets.set.ilidsvidsequence import iLIDSVIDSEQUENCE from datasets.set.lsvid import LSVID
11,830
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE,
# from torchvision.transforms import InterpolationMode # import torchvision.transforms as T __factory = { 'mars': Mars, 'ilidsvidsequence': iLIDSVIDSEQUENCE,
'lsvid': LSVID
8
2023-12-11 04:03:46+00:00
16k
MarilynKeller/aitviewer-skel
aitviewer/renderables/rigid_bodies.py
[ { "identifier": "Arrows", "path": "aitviewer/renderables/arrows.py", "snippet": "class Arrows(Node):\n \"\"\"\n Draw an arrow.\n \"\"\"\n\n def __init__(\n self,\n origins,\n tips,\n r_base=0.01,\n r_head=0.02,\n p=0.1,\n color=(0.0, 0.0, 0.5, 1.0),\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param origins: Set of 3D coordinates of the base of the arrows as a np array of shape (N, B, 3).\n :param tips: Set of 3D coordinates denoting the tip of the arrow (N, T, 3).\n :param r_base: Radius of the base cylinder.\n :param r_head: Radius of the tip cylinder.\n :param p: Percentage of arrow head on the entire length.\n :param color: Color of the line (4-tuple).\n \"\"\"\n assert origins.shape == tips.shape\n if len(origins.shape) == 2:\n origins = origins[np.newaxis]\n tips = tips[np.newaxis]\n else:\n assert len(origins.shape) == 3\n super(Arrows, self).__init__(n_frames=len(origins), **kwargs)\n\n self._origins = origins\n self._tips = tips\n\n # Percentage of arrow head on entire length\n self.p = p\n\n # Nodes\n self.material.color = color\n self.bases_r = Lines(\n lines=self.get_line_coords(self.origins, self.mid_points),\n mode=\"lines\",\n r_base=r_base,\n color=color,\n cast_shadow=False,\n is_selectable=False,\n )\n self.arrows_r = Lines(\n lines=self.get_line_coords(self.mid_points, self.tips),\n mode=\"lines\",\n r_base=r_head,\n r_tip=0.0,\n color=color,\n cast_shadow=False,\n is_selectable=False,\n )\n\n self._add_nodes(self.bases_r, self.arrows_r, show_in_hierarchy=False)\n\n @property\n def bounds(self):\n return self.arrows_r.bounds\n\n @property\n def current_bounds(self):\n return self.arrows_r.current_bounds\n\n @property\n def current_origins(self):\n idx = self.current_frame_id if self.origins.shape[0] > 1 else 0\n return self.origins[idx]\n\n @current_origins.setter\n def current_origins(self, origins):\n idx = self.current_frame_id if self.origins.shape[0] > 1 else 0\n self.origins[idx] = origins\n\n @property\n def current_tips(self):\n idx = self.current_frame_id if self.tips.shape[0] > 1 else 0\n return self.tips[idx]\n\n @current_tips.setter\n def current_tips(self, tips):\n idx = self.current_frame_id if self.tips.shape[0] > 1 else 0\n self.tips[idx] = tips\n\n @property\n def origins(self):\n return self._origins\n\n @origins.setter\n def origins(self, origins):\n self._origins = origins if len(origins.shape) == 3 else origins[np.newaxis]\n self.n_frames = self._origins.shape[0]\n\n @property\n def tips(self):\n return self._tips\n\n @tips.setter\n def tips(self, tips):\n self._tips = tips if len(tips.shape) == 3 else tips[np.newaxis]\n self.n_frames = self._tips.shape[0]\n\n @property\n def mid_points(self):\n return self.origins + (self.tips - self.origins) * (1 - self.p)\n\n def get_line_coords(self, starts, ends):\n c = np.zeros((len(self), (starts.shape[1] + ends.shape[1]), 3), dtype=starts.dtype)\n c[:, 0::2] = starts\n c[:, 1::2] = ends\n return c\n\n def redraw(self, **kwargs):\n self.bases_r.lines = self.get_line_coords(self.origins, self.mid_points)\n self.arrows_r.lines = self.get_line_coords(self.mid_points, self.tips)\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.bases_r.color = color\n self.arrows_r.color = color\n\n def update_frames(self, origins, tips, frames):\n self.origins[frames] = origins\n self.tips[frames] = tips\n self.n_frames = self.origins.shape[0]\n self.redraw()\n\n def add_frames(self, origins, tips):\n if len(origins.shape) == 2:\n origins = origins[np.newaxis]\n self.origins = np.append(self.origins, origins, axis=0)\n\n if len(tips.shape) == 2:\n tips = tips[np.newaxis]\n self.tips = np.append(self.tips, tips, axis=0)\n\n self.n_frames = self.origins.shape[0]\n self.redraw()\n\n def remove_frames(self, frames):\n self.origins = np.delete(self.origins, frames, axis=0)\n self.tips = np.delete(self.tips, frames, axis=0)\n\n self.n_frames = self.origins.shape[0]\n self.redraw()" }, { "identifier": "Spheres", "path": "aitviewer/renderables/spheres.py", "snippet": "class Spheres(Node):\n \"\"\"Render some simple spheres.\"\"\"\n\n def __init__(\n self,\n positions,\n radius=0.01,\n color=(0.0, 0.0, 1.0, 1.0),\n rings=16,\n sectors=32,\n icon=\"\\u008d\",\n cast_shadow=False,\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param positions: A numpy array of shape (F, N, 3) or (N, 3) containing N sphere positions for F time steps.\n :param radius: Radius of the spheres.\n :param color: Color of the spheres.\n :param rings: Longitudinal resolution.\n :param sectors: Latitudinal resolution.\n \"\"\"\n if len(positions.shape) == 2:\n positions = positions[np.newaxis]\n assert len(positions.shape) == 3\n\n # Define a default material in case there is None.\n if isinstance(color, tuple) or len(color.shape) == 1:\n kwargs[\"material\"] = kwargs.get(\"material\", Material(color=color, ambient=0.2))\n self.sphere_colors = kwargs[\"material\"].color\n else:\n assert color.shape[1] == 4 and positions.shape[1] == color.shape[0]\n self.sphere_colors = color\n\n if \"n_frames\" not in kwargs:\n kwargs[\"n_frames\"] = positions.shape[0]\n super().__init__(icon=icon, **kwargs)\n\n self._sphere_positions = positions\n self.radius = radius\n\n self.vertices, self.faces = _create_sphere(radius=1.0, rings=rings, sectors=sectors)\n self.n_vertices = self.vertices.shape[0]\n self.n_spheres = self.sphere_positions.shape[1]\n\n self.draw_edges = False\n self._need_upload = True\n\n # Render passes.\n self.outline = True\n self.fragmap = True\n self.depth_prepass = True\n self.cast_shadow = cast_shadow\n\n @property\n def bounds(self):\n bounds = self.get_bounds(self.sphere_positions)\n bounds[:, 0] -= self.radius\n bounds[:, 1] += self.radius\n return bounds\n\n @property\n def current_bounds(self):\n bounds = self.get_bounds(self.current_sphere_positions)\n bounds[:, 0] -= self.radius\n bounds[:, 1] += self.radius\n return bounds\n\n @property\n def vertex_colors(self):\n if len(self._sphere_colors.shape) == 1:\n return np.full((self.n_spheres * self.n_vertices, 4), self._sphere_colors)\n else:\n return np.tile(self._sphere_colors, (self.n_vertices, 1))\n\n def color_one(self, index, color):\n new_colors = np.tile(np.array(self.material.color), (self.n_spheres, 1))\n new_colors[index] = color\n self.sphere_colors = new_colors\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.sphere_colors = color\n self.redraw()\n\n @property\n def sphere_colors(self):\n if len(self._sphere_colors.shape) == 1:\n t = np.tile(np.array(self._sphere_colors), (self.n_spheres, 1))\n return t\n else:\n return self._sphere_colors\n\n @sphere_colors.setter\n def sphere_colors(self, color):\n if isinstance(color, tuple):\n color = np.array(color)\n self._sphere_colors = color\n self.redraw()\n\n @property\n def current_sphere_positions(self):\n idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0\n return self.sphere_positions[idx]\n\n @current_sphere_positions.setter\n def current_sphere_positions(self, positions):\n assert len(positions.shape) == 2\n idx = self.current_frame_id if self.sphere_positions.shape[0] > 1 else 0\n self.sphere_positions[idx] = positions\n self.redraw()\n\n @property\n def sphere_positions(self):\n return self._sphere_positions\n\n @sphere_positions.setter\n def sphere_positions(self, pos):\n if len(pos.shape) == 2:\n pos = pos[np.newaxis]\n self._sphere_positions = pos\n self.n_frames = len(self._sphere_positions)\n self.redraw()\n\n def on_frame_update(self):\n self.redraw()\n\n def redraw(self, **kwargs):\n self._need_upload = True\n\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n self.prog = get_sphere_instanced_program()\n\n vs_path = \"sphere_instanced_positions.vs.glsl\"\n self.outline_program = get_outline_program(vs_path)\n self.depth_only_program = get_depth_only_program(vs_path)\n self.fragmap_program = get_fragmap_program(vs_path)\n\n self.vbo_vertices = ctx.buffer(self.vertices.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.astype(\"i4\").tobytes())\n\n self.vbo_instance_position = ctx.buffer(reserve=self.n_spheres * 12)\n self.vbo_instance_color = ctx.buffer(reserve=self.n_spheres * 16)\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_instance_position, \"3f4/i\", \"instance_position\")\n self.vao.buffer(self.vbo_instance_color, \"4f4/i\", \"instance_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n def _upload_buffers(self):\n if not self.is_renderable or not self._need_upload:\n return\n self._need_upload = False\n self.vbo_instance_position.write(self.current_sphere_positions.astype(\"f4\").tobytes())\n if len(self._sphere_colors.shape) > 1:\n self.vbo_instance_color.write(self._sphere_colors.astype(\"f4\").tobytes())\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n\n prog = self.prog\n prog[\"radius\"] = self.radius\n if len(self._sphere_colors.shape) == 1:\n prog[\"use_uniform_color\"] = True\n prog[\"uniform_color\"] = tuple(self._sphere_colors)\n else:\n prog[\"use_uniform_color\"] = False\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n prog[\"clip_control\"].value = (0, 0, 0)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_spheres)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n prog[\"radius\"] = self.radius\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_spheres)\n\n def gui(self, imgui):\n _, self.radius = imgui.drag_float(\"Radius\", self.radius, 0.01, min_value=0.001, max_value=10.0, format=\"%.3f\")\n super().gui(imgui)\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n\n def update_frames(self, positions, frames):\n self.sphere_positions[frames] = positions\n self.redraw()\n\n def add_frames(self, positions):\n if len(positions.shape) == 2:\n positions = positions[np.newaxis]\n self.sphere_positions = np.append(self.sphere_positions, positions, axis=0)\n\n def remove_frames(self, frames):\n self.sphere_positions = np.delete(self.sphere_positions, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n V = self.vertices.shape[0]\n N = self.sphere_positions.shape[0]\n M = self.n_spheres\n\n vertices = np.empty((N, V * M, 3), np.float32)\n for i in range(N):\n vs = self.vertices[np.newaxis].repeat(M, 0)\n vertices[i] = (vs * self.radius + self.sphere_positions[i].reshape(M, 1, 3)).reshape((-1, 3))\n\n fs = self.faces[np.newaxis].repeat(M, 0).reshape((M, -1))\n offsets = (np.arange(M) * V).reshape((M, 1))\n faces = (fs + offsets).reshape((-1, 3))\n\n mesh = usd.add_mesh(stage, usd_path, self.name, vertices, faces, self.get_local_transform())\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Node", "path": "aitviewer/scene/node.py", "snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "compute_union_of_bounds", "path": "aitviewer/utils/utils.py", "snippet": "def compute_union_of_bounds(nodes):\r\n if len(nodes) == 0:\r\n return np.zeros((3, 2))\r\n\r\n bounds = np.array([[np.inf, np.NINF], [np.inf, np.NINF], [np.inf, np.NINF]])\r\n for n in nodes:\r\n child = n.bounds\r\n bounds[:, 0] = np.minimum(bounds[:, 0], child[:, 0])\r\n bounds[:, 1] = np.maximum(bounds[:, 1], child[:, 1])\r\n return bounds\r" }, { "identifier": "compute_union_of_current_bounds", "path": "aitviewer/utils/utils.py", "snippet": "def compute_union_of_current_bounds(nodes):\r\n if len(nodes) == 0:\r\n return np.zeros((3, 2))\r\n\r\n bounds = np.array([[np.inf, np.NINF], [np.inf, np.NINF], [np.inf, np.NINF]])\r\n for n in nodes:\r\n child = n.current_bounds\r\n bounds[:, 0] = np.minimum(bounds[:, 0], child[:, 0])\r\n bounds[:, 1] = np.maximum(bounds[:, 1], child[:, 1])\r\n return bounds\r" } ]
import numpy as np from aitviewer.renderables.arrows import Arrows from aitviewer.renderables.spheres import Spheres from aitviewer.scene.node import Node from aitviewer.utils.utils import ( compute_union_of_bounds, compute_union_of_current_bounds, )
11,651
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class RigidBodies(Node): """ A sequence of N 3D positions and orientations in space. """ def __init__( self, rb_pos, rb_ori, radius=0.02, length=0.2, radius_cylinder=None, color=(0.0, 1.0, 0.5, 1.0), icon="\u0086", **kwargs, ): """ Initializer. :param rb_pos: A np array of shape (F, N, 3) containing N rigid-body centers over F time steps. :param rb_ori: A np array of shape (F, N, 3, 3) containing N rigid-body orientations over F time steps. :param radius: Radius of the sphere at the origin of the rigid body. :param length: Length of arrows representing the orientation of the rigid body. :param radius_cylinder: Radius of the cylinder representing the orientation, default is length / 50 :param color: Color of the rigid body centers (4-tuple). """ self._rb_pos = rb_pos[np.newaxis] if rb_pos.ndim == 2 else rb_pos self._rb_ori = rb_ori[np.newaxis] if rb_ori.ndim == 3 else rb_ori super(RigidBodies, self).__init__(n_frames=self.rb_pos.shape[0], color=color, icon=icon, **kwargs) self.radius = radius self.length = length self.spheres = Spheres(rb_pos, radius=radius, color=color, is_selectable=False) self._add_node(self.spheres, show_in_hierarchy=False) self.coords = [] r_base = radius_cylinder or length / 50 r_head = r_base * 2 c = [0.0, 0.0, 0.0, 1.0] for i in range(3): line = self.rb_ori[..., :, i] line = line / np.linalg.norm(line, axis=-1, keepdims=True) * length color = c.copy() color[i] = 1.0 axs = Arrows( self.rb_pos, self.rb_pos + line, r_base=r_base, r_head=r_head, color=tuple(color), is_selectable=False, ) self._add_node(axs, show_in_hierarchy=False) self.coords.append(axs) @Node.color.setter def color(self, color): self.material.color = color self.spheres.color = color @property def current_rb_pos(self): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 return self.rb_pos[idx] @current_rb_pos.setter def current_rb_pos(self, pos): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 self.rb_pos[idx] = pos @property def current_rb_ori(self): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 return self.rb_ori[idx] @current_rb_ori.setter def current_rb_ori(self, ori): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 self.rb_ori[idx] = ori @property def rb_pos(self): return self._rb_pos @rb_pos.setter def rb_pos(self, rb_pos): self._rb_pos = rb_pos if len(rb_pos.shape) == 3 else rb_pos[np.newaxis] self.n_frames = self._rb_pos.shape[0] @property def rb_ori(self): return self._rb_ori @rb_ori.setter def rb_ori(self, rb_ori): self._rb_ori = rb_ori if len(rb_ori.shape) == 4 else rb_ori[np.newaxis] self.n_frames = self._rb_ori.shape[0] @property def bounds(self):
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class RigidBodies(Node): """ A sequence of N 3D positions and orientations in space. """ def __init__( self, rb_pos, rb_ori, radius=0.02, length=0.2, radius_cylinder=None, color=(0.0, 1.0, 0.5, 1.0), icon="\u0086", **kwargs, ): """ Initializer. :param rb_pos: A np array of shape (F, N, 3) containing N rigid-body centers over F time steps. :param rb_ori: A np array of shape (F, N, 3, 3) containing N rigid-body orientations over F time steps. :param radius: Radius of the sphere at the origin of the rigid body. :param length: Length of arrows representing the orientation of the rigid body. :param radius_cylinder: Radius of the cylinder representing the orientation, default is length / 50 :param color: Color of the rigid body centers (4-tuple). """ self._rb_pos = rb_pos[np.newaxis] if rb_pos.ndim == 2 else rb_pos self._rb_ori = rb_ori[np.newaxis] if rb_ori.ndim == 3 else rb_ori super(RigidBodies, self).__init__(n_frames=self.rb_pos.shape[0], color=color, icon=icon, **kwargs) self.radius = radius self.length = length self.spheres = Spheres(rb_pos, radius=radius, color=color, is_selectable=False) self._add_node(self.spheres, show_in_hierarchy=False) self.coords = [] r_base = radius_cylinder or length / 50 r_head = r_base * 2 c = [0.0, 0.0, 0.0, 1.0] for i in range(3): line = self.rb_ori[..., :, i] line = line / np.linalg.norm(line, axis=-1, keepdims=True) * length color = c.copy() color[i] = 1.0 axs = Arrows( self.rb_pos, self.rb_pos + line, r_base=r_base, r_head=r_head, color=tuple(color), is_selectable=False, ) self._add_node(axs, show_in_hierarchy=False) self.coords.append(axs) @Node.color.setter def color(self, color): self.material.color = color self.spheres.color = color @property def current_rb_pos(self): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 return self.rb_pos[idx] @current_rb_pos.setter def current_rb_pos(self, pos): idx = self.current_frame_id if self.rb_pos.shape[0] > 1 else 0 self.rb_pos[idx] = pos @property def current_rb_ori(self): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 return self.rb_ori[idx] @current_rb_ori.setter def current_rb_ori(self, ori): idx = self.current_frame_id if self.rb_ori.shape[0] > 1 else 0 self.rb_ori[idx] = ori @property def rb_pos(self): return self._rb_pos @rb_pos.setter def rb_pos(self, rb_pos): self._rb_pos = rb_pos if len(rb_pos.shape) == 3 else rb_pos[np.newaxis] self.n_frames = self._rb_pos.shape[0] @property def rb_ori(self): return self._rb_ori @rb_ori.setter def rb_ori(self, rb_ori): self._rb_ori = rb_ori if len(rb_ori.shape) == 4 else rb_ori[np.newaxis] self.n_frames = self._rb_ori.shape[0] @property def bounds(self):
return compute_union_of_bounds(self.coords)
3
2023-12-07 16:13:50+00:00
16k
nexB/dejacode
dejacode/urls.py
[ { "identifier": "ComponentViewSet", "path": "component_catalog/api.py", "snippet": "class ComponentViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Component.objects.all()\n serializer_class = ComponentSerializer\n filterset_class = ComponentFilterSet\n lookup_field = \"uuid\"\n search_fields = (\n \"name\",\n \"version\",\n \"copyright\",\n \"homepage_url\",\n \"project\",\n )\n search_fields_autocomplete = (\n \"name\",\n \"version\",\n )\n ordering_fields = (\n \"name\",\n \"version\",\n \"copyright\",\n \"license_expression\",\n \"primary_language\",\n \"project\",\n \"codescan_identifier\",\n \"type\",\n \"configuration_status\",\n \"usage_policy\",\n \"curation_level\",\n \"completion_level\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = ComponentAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"type\",\n \"owner__dataspace\",\n \"configuration_status\",\n )\n .prefetch_related(\n \"licenses__category\",\n \"packages\",\n external_references_prefetch,\n )\n )" }, { "identifier": "PackageViewSet", "path": "component_catalog/api.py", "snippet": "class PackageViewSet(SendAboutFilesMixin, CreateRetrieveUpdateListViewSet):\n queryset = Package.objects.all()\n serializer_class = PackageSerializer\n filterset_class = PackageAPIFilterSet\n lookup_field = \"uuid\"\n search_fields = (\n \"filename\",\n \"project\",\n )\n search_fields_autocomplete = (\n \"type\",\n \"namespace\",\n \"name\",\n \"version\",\n \"filename\",\n )\n ordering_fields = (\n \"download_url\",\n \"filename\",\n \"size\",\n \"release_date\",\n \"primary_language\",\n \"project\",\n \"copyright\",\n \"license_expression\",\n \"usage_policy\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = PackageAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .prefetch_related(\n \"component_set__owner\",\n \"licenses__category\",\n external_references_prefetch,\n )\n )\n\n @action(detail=True)\n def about(self, request, uuid):\n package = self.get_object()\n return Response({\"about_data\": package.as_about_yaml()})\n\n @action(detail=True)\n def about_files(self, request, uuid):\n package = self.get_object()\n about_files = package.get_about_files()\n filename = self.get_filename(package)\n return self.get_zipped_response(about_files, filename)\n\n download_url_description = (\n \"A single, or list of, Download URL(s).<br><br>\"\n '<b>cURL style</b>: <code>-d \"download_url=url1&download_url=url2\"</code><br><br>'\n '<b>Python</b>: <code>data = {\"download_url\": [\"url1\", \"url2\"]}</code>'\n )\n\n add_action_schema = AutoSchema(\n manual_fields=[\n coreapi.Field(\n \"download_url\",\n required=True,\n location=\"body\",\n schema=coreschema.String(description=download_url_description),\n ),\n ]\n )\n\n @action(detail=False, methods=[\"post\"], name=\"Package Add\", schema=add_action_schema)\n def add(self, request):\n \"\"\"\n Alternative way to add a package providing only its `download_url`.\n\n Multiple URLs can be submitted through a single request.\n\n Note that this feature is intended only for publicly available open\n source packages, not your private code.\n\n DejaCode will automatically collect the `filename`, `sha1`, `md5`, and\n `size` and apply them to the package definition.\n The `package_url` will also be generated when possible.\n\n If package scanning is enabled in your dataspace, DejaCode will also\n submit the package to ScanCode.io and the results will be returned to\n the \"Scan\" detail tab of the package when that scan is complete.\n \"\"\"\n download_urls = request.POST.getlist(\"download_url\")\n if not download_urls:\n error = {\"download_url\": \"This field is required.\"}\n return Response(error, status=400)\n\n results = defaultdict(list)\n for url in download_urls:\n url = url.strip()\n package = collect_create_scan(url, request.user)\n if package:\n results[\"added\"].append(url)\n else:\n results[\"failed\"].append(url)\n\n return Response(results)" }, { "identifier": "SubcomponentViewSet", "path": "component_catalog/api.py", "snippet": "class SubcomponentViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Subcomponent.objects.all()\n serializer_class = SubcomponentSerializer\n filterset_class = SubcomponentFilterSet\n lookup_field = \"uuid\"\n search_fields = (\"notes\",)\n ordering_fields = (\n \"license_expression\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"parent\",\n \"child\",\n )\n )" }, { "identifier": "send_scan_notification", "path": "component_catalog/views.py", "snippet": "@require_POST\n@csrf_exempt\ndef send_scan_notification(request, key):\n try:\n json_data = json.loads(request.body.decode(\"utf-8\"))\n except json.JSONDecodeError:\n raise Http404\n\n user_uuid = signing.loads(key)\n if not is_uuid4(user_uuid):\n raise Http404(\"Provided key is not a valid UUID.\")\n\n user = get_object_or_404(DejacodeUser, uuid=user_uuid)\n dataspace = user.dataspace\n\n project = json_data.get(\"project\")\n input_sources = project.get(\"input_sources\")\n if not input_sources:\n raise Http404(\"Missing `input_sources` entry in provided data.\")\n download_url = input_sources[0].get(\"download_url\")\n\n package = get_object_or_404(Package, download_url=download_url, dataspace=user.dataspace)\n description = package.download_url\n\n run = json_data.get(\"run\")\n scan_status = run.get(\"status\")\n\n update_package_from_scan = all(\n [\n dataspace.enable_package_scanning,\n dataspace.update_packages_from_scan,\n scan_status.lower() == \"success\",\n ]\n )\n\n # Triggers the Package data automatic update from Scan results, if enabled.\n if update_package_from_scan:\n scancodeio = ScanCodeIO(user)\n updated_fields = scancodeio.update_from_scan(package, user)\n if updated_fields:\n description = (\n f'Automatically updated {\", \".join(updated_fields)} from scan results\\n'\n + description\n )\n\n notify.send(\n sender=user,\n verb=f\"Scan {scan_status}\",\n action_object=package,\n recipient=user,\n description=description,\n )\n\n return JsonResponse({\"message\": \"Notification created\"})" }, { "identifier": "two_factor", "path": "dje/two_factor.py", "snippet": "TWOFA_USER_SESSION_KEY = \"_2fa_user_id\"\nclass TwoFactorEnableForm(OTPAuthenticationFormMixin, forms.Form):\nclass TwoFactorDisableForm(OTPTokenForm):\nclass TwoFactorVerifyForm(OTPTokenForm):\nclass EnableView(\n LoginRequiredMixin,\n FormView,\n):\nclass LoginView(DefaultLoginView):\nclass VerifyView(DefaultLoginView):\nclass DisableView(\n LoginRequiredMixin,\n FormView,\n):\n def __init__(self, user, key, *args, **kwargs):\n def bin_key(self):\n def clean_token(self):\n def save(self):\n def helper(self):\n def helper(self):\n def helper(self):\n def dispatch(self, request, *args, **kwargs):\n def get(self, request, *args, **kwargs):\n def get_key(self):\n def get_form_kwargs(self):\n def get_qr_code(self):\n def get_context_data(self, **kwargs):\n def form_valid(self, form):\n def form_invalid(self, form):\n def form_valid(self, form):\n def get_form_kwargs(self):\n def form_valid(self, form):\n def dispatch(self, request, *args, **kwargs):\n def get(self, request, *args, **kwargs):\n def get_form_kwargs(self):\n def form_valid(self, form):" }, { "identifier": "dejacode_site", "path": "dje/admin.py", "snippet": "EXTERNAL_SOURCE_LOOKUP = \"external_references__external_source_id\"\nADDITION = History.ADDITION\nCHANGE = History.CHANGE\nDELETION = History.DELETION\n HIDDEN_VALUE = \"*******\"\nclass DejaCodeAdminSite(AdminSite):\nclass ReferenceOnlyPermissions:\nclass DataspacedFKMixin:\nclass ProtectedFieldsMixin:\nclass ChangelistPopupPermissionMixin:\nclass ProhibitDataspaceLookupMixin:\nclass AdvancedSearchAdminMixin:\nclass HistoryAdminMixin:\nclass ColoredIconAdminMixin:\n class Media:\nclass DataspacedChangeList(ChangeList):\nclass DataspacedAdmin(\n DataspacedFKMixin,\n ProtectedFieldsMixin,\n AdvancedSearchAdminMixin,\n HistoryAdminMixin,\n admin.ModelAdmin,\n):\nclass HiddenValueWidget(forms.TextInput):\nclass DataspaceConfigurationForm(forms.ModelForm):\nclass DataspaceConfigurationInline(DataspacedFKMixin, admin.StackedInline):\nclass DataspaceAdmin(\n ReferenceOnlyPermissions,\n HistoryAdminMixin,\n admin.ModelAdmin,\n):\nclass ChildRelationshipInline(DataspacedFKMixin, admin.TabularInline):\nclass ExternalReferenceInline(DataspacedFKMixin, GenericTabularInline):\nclass ExternalSourceAdmin(DataspacedAdmin):\nclass DejacodeUserChangeForm(forms.ModelForm):\n class Meta:\nclass DejacodeUserCreationForm(DejacodeUserChangeForm):\nclass DejacodeUserAdmin(\n DataspacedFKMixin,\n AdvancedSearchAdminMixin,\n HistoryAdminMixin,\n UserAdmin,\n):\nclass GroupAdmin(ReferenceOnlyPermissions, HistoryAdminMixin, GroupAdmin):\n class ReferenceAccessAttemptAdmin(ReferenceOnlyPermissions, AccessAttemptAdmin):\n def get_urls(self):\ndef get_hierarchy_link(obj):\ndef get_additional_information_fieldset(pre_fields=None):\n def has_add_permission(self, request):\n def has_change_permission(self, request, obj=None):\n def has_delete_permission(self, request, obj=None):\n def has_view_permission(self, request, obj=None):\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n def get_readonly_fields(self, request, obj=None):\n def has_change_permission(self, request, obj=None):\n def lookup_allowed(self, lookup, value):\n def check(self, **kwargs):\n def get_queryset(self, request):\n def get_search_results(self, request, queryset, search_term):\n def log_addition(self, request, object, change_message=None):\n def log_change(self, request, object, message):\n def log_deletion(self, request, object, object_repr):\n def history_view(self, request, object_id, extra_context=None):\n def colored_icon(self, obj):\n def get_results(self, request):\n def has_filters_activated(self):\n def get_filters_params(self, params=None):\n def set_reference_link(self, request):\n def __init__(self, model, admin_site):\n def check(self, **kwargs):\n def changeform_view_on_site(self, obj):\n def changelist_view_on_site(self, obj):\n def urn_link(self, instance):\n def get_queryset(self, request):\n def get_changelist(self, request, **kwargs):\n def get_list_filter(self, request):\n def get_readonly_fields(self, request, obj=None):\n def change_view(self, request, object_id, form_url=\"\", extra_context=None):\n def render_change_form(self, request, context, add=False, change=False, form_url=\"\", obj=None):\n def get_selected_ids_from_request(request, queryset):\n def base_action_with_redirect(self, request, queryset, viewname):\n def copy_to(self, request, queryset):\n def compare_with(self, request, queryset):\n def check_updates_in_reference(self, request, queryset):\n def check_newer_version_in_reference(self, request, queryset):\n def base_check_in_reference_action(request, model_class, orm_lookups):\n def get_changes_details(form):\n def save_model(self, request, obj, form, change):\n def save_formset(self, request, form, formset, change):\n def delete_model(self, request, obj):\n def delete_queryset(self, request, queryset):\n def get_urls(self):\n def get_form(self, request, obj=None, **kwargs):\n def get_fieldsets(self, request, obj=None):\n def get_inline_instances(self, request, obj=None):\n def get_actions(self, request):\n def copy_link(self, obj):\n def hide_display_links(request):\n def get_list_display(self, request):\n def get_list_display_links(self, request, list_display):\n def response_change(self, request, obj):\n def lookup_allowed(self, lookup, value):\n def _limited_permission(request, obj, has_perm):\n def has_add_permission(self, request):\n def has_change_permission(self, request, obj=None):\n def has_delete_permission(self, request, obj=None):\n def has_view_permission(self, request, obj=None):\n def has_importer(self):\n def has_activity_log(self):\n def render(self, name, value, attrs=None, renderer=None):\n def __init__(self, *args, **kwargs):\n def clean(self):\n def has_change_permission(self, request, obj=None):\n def get_readonly_fields(self, request, obj=None):\n def get_urls(self):\n def get_queryset(self, request):\n def get_actions(self, request):\n def changeform_view(self, request, object_id=None, form_url=\"\", extra_context=None):\n def references(self, obj):\ndef send_activation_email(user, request):\n def __init__(self, *args, **kwargs):\n def save(self, commit=True):\n def get_form(self, request, obj=None, **kwargs):\n def get_queryset(self, request):\n def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\n def user_change_password(self, request, id, form_url=\"\"):\n def get_list_filter(self, request):\n def has_activity_log(self):\n def get_urls(self):\n def log_addition(self, request, object, change_message=None):\n def delete_model(self, request, obj):\n def get_actions(self, request):\n def set_inactive(self, request, queryset):\n def export_as_csv(self, request, queryset):\n def send_activation_email(self, request, object_id):\n def get_queryset(self, request):\n def get_permissions(self, obj):\n def formfield_for_manytomany(self, db_field, request=None, **kwargs):\n def get_urls(self):\n def get_permission_group_mapping():\n def permission_details_view(self, request):\n def permission_export_csv(self, request):\ndef register_axes_admin():" }, { "identifier": "ExternalReferenceViewSet", "path": "dje/api.py", "snippet": "class ExternalReferenceViewSet(ExtraPermissionsViewSetMixin, CreateRetrieveUpdateListViewSet):\n queryset = ExternalReference.objects.all()\n serializer_class = ExternalReferenceSerializer\n lookup_field = \"uuid\"\n filterset_class = ExternalReferenceFilterSet\n extra_permissions = (TabPermission,)\n search_fields = (\"external_id\",)\n ordering_fields = (\n \"external_source\",\n \"created_date\",\n \"last_modified_date\",\n )\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .scope(self.request.user.dataspace)\n .select_related(\"content_type\")\n .prefetch_related(\"content_object\")\n )" }, { "identifier": "DejaCodeAuthenticationForm", "path": "dje/forms.py", "snippet": "class DejaCodeAuthenticationForm(AuthenticationForm):\n \"\"\"Login form.\"\"\"\n\n use_required_attribute = False\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_id = \"sign-in\"\n helper.form_action = \"login\"\n helper.form_method = \"post\"\n helper.form_tag = False\n\n fields = [\n Field(\"username\", css_class=\"input-block-level mb-3\", placeholder=_(\"Username\")),\n Field(\"password\", css_class=\"input-block-level mb-3\", placeholder=_(\"Password\")),\n Div(\n StrictSubmit(\"submit\", _(\"Sign in\"), css_class=\"btn-warning\"),\n css_class=\"d-grid\",\n ),\n ]\n\n helper.add_layout(Layout(Fieldset(\"\", *fields)))\n return helper\n\n def get_invalid_login_error(self):\n username = self.cleaned_data.get(\"username\")\n if \"@\" in username:\n return ValidationError(\n \"Be sure to enter your DejaCode username rather than your email \"\n \"address to sign in to DejaCode.\"\n )\n return super().get_invalid_login_error()" }, { "identifier": "DejaCodeActivationView", "path": "dje/registration.py", "snippet": "class DejaCodeActivationView(ActivationView):\n def get_success_url(self, user=None):\n \"\"\"Add support for 'Sign Up' registration and User creation in admin.\"\"\"\n if user.has_usable_password():\n # User created from registration process\n return self.success_url\n\n # User created in the admin addition view\n return self.get_password_reset_confirm_url(user)\n\n @staticmethod\n def get_password_reset_confirm_url(user):\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n token_generator = PasswordResetTokenGenerator()\n token = token_generator.make_token(user)\n return reverse(\"password_reset_confirm\", args=(uid, token))\n\n def get_user(self, username):\n \"\"\"\n Remove the `already_activated` exception from original method.\n\n The activation link is valid and usable until the\n `ACCOUNT_ACTIVATION_DAYS` period is expired.\n\n This is required, for a user created by an admin user, to reach\n the \"set password\" form even if the activation URL was already\n requested (by an email service for example).\n \"\"\"\n User = get_user_model()\n try:\n user = User.objects.get(\n **{\n User.USERNAME_FIELD: username,\n }\n )\n return user\n except User.DoesNotExist:\n raise ActivationError(self.BAD_USERNAME_MESSAGE, code=\"bad_username\")" }, { "identifier": "DejaCodeRegistrationForm", "path": "dje/registration.py", "snippet": "class DejaCodeRegistrationForm(RegistrationFormUniqueEmail):\n \"\"\"Used in `registration.backends.hmac.views.RegistrationView`.\"\"\"\n\n use_required_attribute = False\n hcaptcha = hCaptchaField()\n\n class Meta(RegistrationFormUniqueEmail.Meta):\n model = User\n fields = [\n \"username\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"company\",\n \"password1\",\n \"hcaptcha\",\n \"updates_email_notification\",\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if \"password2\" in self.fields:\n del self.fields[\"password2\"]\n\n self.fields[\"username\"].validators.append(validators.MinLengthValidator(3))\n\n placeholders = {\n \"username\": _(\"Username\"),\n \"email\": _(\"Email address\"),\n \"first_name\": _(\"First name\"),\n \"last_name\": _(\"Last name\"),\n \"company\": _(\"Company\"),\n \"password1\": _(\"Choose a password\"),\n }\n for field_name, placeholder in placeholders.items():\n self.fields[field_name].widget.attrs[\"placeholder\"] = placeholder\n\n self.fields[\"first_name\"].required = True\n self.fields[\"last_name\"].required = True\n self.fields[\"company\"].required = True\n\n self.fields[\"hcaptcha\"].label = \"\"\n\n self.fields[\n \"updates_email_notification\"\n ].label = \"Receive updates on DejaCode features and news\"\n\n for field in self.fields.values():\n field.help_text = None\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_id = \"registration\"\n helper.form_method = \"post\"\n helper.form_action = \"django_registration_register\"\n helper.attrs = {\"autocomplete\": \"off\"}\n\n eula = HTML(\n '<p class=\"eula\">By clicking on \"Create account\" below, you are agreeing '\n 'to our <a href=\"/eula/\">EULA</a>.</p>'\n )\n\n helper.layout = Layout(\n Fieldset(\n None,\n Field(\"username\", css_class=\"input-block-level\"),\n Field(\"email\", css_class=\"input-block-level\"),\n Div(\n Div(Field(\"first_name\"), css_class=\"col ps-0\"),\n Div(Field(\"last_name\"), css_class=\"col pe-0\"),\n css_class=\"row m-0\",\n ),\n Field(\"company\", css_class=\"input-block-level\"),\n Field(\n \"password1\",\n css_class=\"input-block-level\",\n autocomplete=\"new-password\",\n ),\n Div(\n Field(\"updates_email_notification\"),\n css_class=\"alert alert-primary px-2\",\n ),\n \"hcaptcha\",\n eula,\n Div(\n StrictSubmit(\n \"submit\",\n _(\"Create your account\"),\n css_class=\"btn btn-warning\",\n ),\n css_class=\"d-grid\",\n ),\n ),\n )\n\n return helper\n\n def clean_password1(self):\n password1 = self.cleaned_data.get(\"password1\")\n self.instance.username = self.cleaned_data.get(\"username\")\n password_validation.validate_password(password1, self.instance)\n return password1\n\n def save(self, commit=True):\n \"\"\"Add the default Dataspace on the user instance before saving.\"\"\"\n user = super().save(commit=False)\n\n user.dataspace, _ = Dataspace.objects.get_or_create(name=REGISTRATION_DEFAULT_DATASPACE)\n user.is_active = False\n if REGISTRATION_DEFAULT_IS_STAFF:\n user.is_staff = True\n user.save()\n\n for group_name in REGISTRATION_DEFAULT_GROUPS:\n with suppress(Group.DoesNotExist):\n user.groups.add(Group.objects.get(name=group_name))\n\n self.send_notification_email_to_admins(user)\n History.log_addition(user, user)\n return user\n\n @staticmethod\n def send_notification_email_to_admins(user):\n subject = \"[DejaCode] New User registration\"\n message = f\"New registration for user {user.username} {user.email}\"\n send_mail_to_admins_task.delay(subject, message)" }, { "identifier": "AccountProfileView", "path": "dje/views.py", "snippet": "class AccountProfileView(\n LoginRequiredMixin,\n FormView,\n):\n template_name = \"account/profile.html\"\n form_class = AccountProfileForm\n success_url = reverse_lazy(\"account_profile\")\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"user_has_device\": django_otp.user_has_device(self.request.user),\n }\n )\n\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.request.user\n return kwargs\n\n def form_valid(self, form):\n if getattr(form, \"changed_data\", None):\n form.save()\n messages.success(self.request, _(\"Profile updated.\"))\n return super().form_valid(form)\n\n def post(self, request, *args, **kwargs):\n \"\"\"Add the ability to regenerate the API key.\"\"\"\n if request.POST.get(\"regenerate-api-key\"):\n request.user.regenerate_api_key()\n messages.success(request, _(\"Your API key was regenerated.\"))\n return self.form_valid(None)\n\n return super().post(request, *args, **kwargs)" }, { "identifier": "AllNotificationsList", "path": "dje/views.py", "snippet": "class AllNotificationsList(\n NotificationsCountMixin,\n notifications_views.AllNotificationsList,\n):\n pass" }, { "identifier": "DataspaceAwareAutocompleteLookup", "path": "dje/views.py", "snippet": "class DataspaceAwareAutocompleteLookup(AutocompleteLookup):\n \"\"\"\n Extend Grappelli's ``AutocompleteLookup`` view class so that the data\n it Return is scoped to the current user's dataspace.\n\n The correct behavior is for the autocomplete results to be scoped to the\n dataspace of the object that the user is editing.\n Otherwise if the user is a member of the reference dataspace, the autocomplete\n results will contain objects from other dataspaces.\n In case the user is in the reference dataspace, we use the HTTP_REFERER from\n the request to determine the edited object and scope the result to its\n dataspace.\n The proper way to do this should be to patch Grappelli's JavaScript source\n code to pass parameters about the edited object in the ajax request.\n\n https://github.com/sehmaschine/django-grappelli/issues/362\n\n The security scoping is applied when the related manager is flagged as `is_secured`.\n \"\"\"\n\n def set_dataspace_scope(self, qs):\n \"\"\"\n Limit the queryset scope to the user dataspace.\n If the user is a reference dataspace user, he may been editing an object\n from another dataspace, in that case we are trying to limit the\n results to this dataspace.\n \"\"\"\n user_dataspace = self.request.user.dataspace\n\n # If the user is a reference dataspace user, we look into the the `HTTP_REFERER`\n # to determine if he's looking at another dataspace object.\n if user_dataspace.is_reference:\n instance = get_instance_from_referer(self.request)\n if instance:\n return qs.scope(instance.dataspace)\n\n return qs.scope(user_dataspace)\n\n def get_annotated_queryset(self, qs):\n \"\"\"\n Add some annotations to assist the search fields defined in\n GRAPPELLI_AUTOCOMPLETE_SEARCH_FIELDS.\n \"\"\"\n if self.model._meta.model_name in [\"product\", \"component\"]:\n qs = qs.annotate(name_version=Concat(\"name\", Value(\" \"), \"version\"))\n\n if self.model._meta.model_name == \"package\":\n qs = qs.annotate(\n type_name_version=Concat(\"type\", Value(\" \"), \"name\", Value(\" \"), \"version\"),\n )\n\n return qs\n\n def get_searched_queryset(self, qs):\n \"\"\"\n Add support for search `Package` directly from a Package URL input term\n such as: 'pkg:type/name@version'.\n \"\"\"\n term = self.GET.get(\"term\")\n if self.model._meta.model_name == \"package\" and term.startswith(\"pkg:\"):\n return qs.for_package_url(term)\n\n return super().get_searched_queryset(qs)\n\n def get_queryset(self):\n if is_secured(self.model._default_manager):\n perm = get_permission_codename(\"change\", self.model._meta)\n qs = self.model._default_manager.get_queryset(self.request.user, perm)\n else:\n qs = self.set_dataspace_scope(self.model._default_manager.all())\n\n qs = self.get_annotated_queryset(qs)\n qs = self.get_filtered_queryset(qs)\n qs = self.get_searched_queryset(qs)\n return qs.distinct()" }, { "identifier": "DataspaceAwareRelatedLookup", "path": "dje/views.py", "snippet": "class DataspaceAwareRelatedLookup(RelatedLookup):\n \"\"\"\n Rxtend Grappelli's ``RelatedLookup`` view class so that the data it\n Return is scoped to the current user's dataspace.\n\n The security scoping is applied when the related manager is flagged as `is_secured`.\n \"\"\"\n\n def get_queryset(self):\n if is_secured(self.model._default_manager):\n perm = get_permission_codename(\"change\", self.model._meta)\n qs = self.model._default_manager.get_queryset(self.request.user, perm)\n qs = self.get_filtered_queryset(qs)\n else:\n qs = super().get_queryset()\n\n user_dataspace = self.request.user.dataspace\n if not user_dataspace.is_reference:\n qs = qs.scope(user_dataspace)\n return qs" }, { "identifier": "GlobalSearchListView", "path": "dje/views.py", "snippet": "class GlobalSearchListView(AcceptAnonymousMixin, TemplateView):\n template_name = \"global_search.html\"\n SearchResult = namedtuple(\"SearchResult\", [\"object_list\", \"paginator_count\"])\n\n def get_list_view_results(self, view_class, dataspace):\n request = RequestFactory().get(\"\", self.request.GET)\n # Fake User.dataspace using deepcopy() to avoid any side-effects on the UI.\n request.user = copy.deepcopy(self.request.user)\n request.user.dataspace = dataspace\n request.session = {}\n response = view_class.as_view()(request)\n return self.SearchResult(\n object_list=response.context_data[\"object_list\"],\n paginator_count=response.context_data[\"paginator\"].count,\n )\n\n def get_context_data(self, **kwargs):\n # Avoid circular references\n from component_catalog.views import ComponentListView\n from component_catalog.views import PackageListView\n from license_library.views import LicenseListView\n from organization.views import OwnerListView\n from product_portfolio.views import ProductListView\n\n get_result = self.get_list_view_results\n context = super().get_context_data(**kwargs)\n search_query = self.request.GET.get(\"q\", \"\")\n if not search_query:\n return context\n\n user = self.request.user\n user_dataspace = user.dataspace\n reference_dataspace = Dataspace.objects.get_reference()\n\n context.update(\n {\n \"search_query\": search_query,\n \"component_results\": get_result(ComponentListView, user_dataspace),\n \"package_results\": get_result(PackageListView, user_dataspace),\n \"license_results\": get_result(LicenseListView, user_dataspace),\n \"owner_results\": get_result(OwnerListView, user_dataspace),\n }\n )\n\n include_products = all(\n [\n user.is_authenticated,\n user.has_perm(\"product_portfolio.view_product\"),\n ]\n )\n\n if include_products:\n context.update(\n {\n \"include_products\": True,\n \"product_results\": get_result(ProductListView, user_dataspace),\n }\n )\n\n insert_reference_data = all(\n [\n self.request.user.is_authenticated,\n reference_dataspace,\n user_dataspace != reference_dataspace,\n ]\n )\n\n if insert_reference_data:\n context.update(\n {\n \"reference_component_results\": get_result(\n ComponentListView, reference_dataspace\n ),\n \"reference_license_results\": get_result(LicenseListView, reference_dataspace),\n \"reference_package_results\": get_result(PackageListView, reference_dataspace),\n \"reference_owner_results\": get_result(OwnerListView, reference_dataspace),\n \"reference_dataspace\": reference_dataspace,\n }\n )\n\n context[\"include_purldb\"] = all(\n [user_dataspace.enable_purldb_access, PurlDB(user).is_available()]\n )\n\n return context" }, { "identifier": "IntegrationsStatusView", "path": "dje/views.py", "snippet": "class IntegrationsStatusView(\n LoginRequiredMixin,\n IsStaffMixin,\n TemplateView,\n):\n template_name = \"integrations_status.html\"\n # Make sure additional integration have a `module.label` set\n # along the `is_configured` and `is_available` functions.\n integrations = [\n ScanCodeIO,\n PurlDB,\n VulnerableCode,\n ]\n\n def get_integration_status(self, integration_class):\n \"\"\"\n Return the current status of the provided `integration_module`.\n Only check the availability if the integration is configured.\n \"\"\"\n is_configured = False\n is_available = False\n error_log = \"\"\n\n integration = integration_class(user=self.request.user)\n\n if integration.is_configured():\n is_configured = True\n try:\n is_available = integration.is_available(raise_exceptions=True)\n except Exception as exception:\n error_log = str(exception)\n\n status = {\n \"is_configured\": is_configured,\n \"is_available\": is_available,\n \"error_log\": error_log,\n }\n\n if self.request.user.is_superuser:\n status[\"service_url\"] = integration.service_url\n\n return status\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n integrations_status = {\n integration_class.label: self.get_integration_status(integration_class)\n for integration_class in self.integrations\n }\n\n context.update(\n {\n \"integrations_status\": integrations_status,\n }\n )\n return context" }, { "identifier": "UnreadNotificationsList", "path": "dje/views.py", "snippet": "class UnreadNotificationsList(\n NotificationsCountMixin,\n notifications_views.UnreadNotificationsList,\n):\n pass" }, { "identifier": "home_view", "path": "dje/views.py", "snippet": "@login_required\ndef home_view(request):\n \"\"\"Dataspace homepage.\"\"\"\n documentation_urls = {}\n rtd_url = \"https://dejacode.readthedocs.io/en/latest\"\n tutorials_label = format_html('Tutorials <span class=\"badge text-bg-success\">New</span>')\n\n documentation_urls = {\n tutorials_label: f\"{rtd_url}/tutorial-1.html\",\n \"How-To videos\": \"https://www.youtube.com/playlist?list=PLCq_LXeUqhkQj0u7M26fSHt1ebFhNCpCv\",\n \"API documentation\": reverse(\"api-docs:docs-index\"),\n }\n\n support_urls = {\n \"Report an issue\": \"https://github.com/nexB/dejacode/issues/new/\",\n }\n\n sections = {\n \"Documentation\": documentation_urls,\n \"Support\": support_urls,\n }\n\n user = request.user\n if user.is_staff:\n documentation_urls[\"Models documentation\"] = reverse(\"admin:docs_models\")\n\n request_qs = Request.objects.for_list_view(user).open().order_by(\"-last_modified_date\")\n\n cards = []\n homepage_layout = user.get_homepage_layout()\n if homepage_layout:\n cards = homepage_layout.cards_with_objects(user)\n\n context = {\n \"sections\": sections,\n \"request_assigned_to_me\": request_qs.assigned_to(user),\n \"request_followed_by_me\": request_qs.followed_by(user),\n \"cards\": cards,\n }\n return render(request, \"dataspace_home.html\", context)" }, { "identifier": "index_dispatch", "path": "dje/views.py", "snippet": "@accept_anonymous\ndef index_dispatch(request):\n \"\"\"Redirect to the LOGIN_REDIRECT_URL.\"\"\"\n return redirect(settings.LOGIN_REDIRECT_URL)" }, { "identifier": "urn_resolve_view", "path": "dje/views.py", "snippet": "@accept_anonymous\ndef urn_resolve_view(request, urn=None):\n \"\"\"\n Given a URN, this view Return the details page of the Object.\n The URN needs to be well formatted and to target an existing Object.\n If not, an error page is returned.\n See the URN module for details on supported models.\n \"\"\"\n # Supports value from the URL or submitted by the form in the urn_resolve.html template\n urn = urn or request.GET.get(\"urn\")\n if not urn:\n return render(request, \"urn_resolve.html\")\n\n try:\n # The resolve method will return the corresponding Object\n instance = urn_resolve(urn, request.user.dataspace)\n # Redirecting the user to the details page of the Object\n return redirect(instance.get_absolute_url())\n except URNValidationError as e:\n error_message = e\n except ObjectDoesNotExist:\n # URN format and model is correct, but the Object request do no exists\n error_message = \"The requested Object does not exist.\"\n except AttributeError:\n # The get_absolute_url() method is not implemented for this Model,\n # We do not have a details view for this Model.\n error_message = \"Unsupported URN model.\"\n\n return render(\n request,\n \"urn_resolve.html\",\n {\n \"error\": error_message,\n \"urn\": urn,\n },\n )" }, { "identifier": "LicenseAnnotationViewSet", "path": "license_library/api.py", "snippet": "class LicenseAnnotationViewSet(mixins.DestroyModelMixin, CreateRetrieveUpdateListViewSet):\n queryset = LicenseAnnotation.objects.all()\n serializer_class = LicenseAnnotationSerializer\n pagination_class = LicenseAnnotationPagination\n filterset_class = LicenseAnnotationFilterSet\n lookup_field = \"id\"\n renderer_classes = [renderers.JSONRenderer]\n permission_classes = [\n IsAuthenticatedOrAnonymous,\n permissions.DjangoModelPermissionsOrAnonReadOnly,\n ]\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"license\",\n \"assigned_tag\",\n \"assigned_tag__license_tag\",\n )\n .order_by(\"id\")\n )\n\n @staticmethod\n def log_action(request, obj, message):\n History.log_change(request.user, obj, message)\n\n @staticmethod\n def construct_change_message(annotation, action):\n \"\"\"\n Create a message suitable for the LogEntry model.\n Similar to messages from ModelAdmin.construct_change_message()\n \"\"\"\n if annotation.assigned_tag:\n tag_message = f'for tag: \"{annotation.assigned_tag}\"'\n else:\n tag_message = \"without tag\"\n\n return \"{action} a {name} {tag_message}.\".format(\n action=action,\n name=str(annotation._meta.verbose_name),\n tag_message=str(tag_message),\n )\n\n def perform_create(self, serializer):\n # WARNING: bypassing the direct super() on purpose\n super(CreateRetrieveUpdateListViewSet, self).perform_create(serializer)\n message = self.construct_change_message(serializer.instance, \"Added\")\n self.log_action(self.request, serializer.instance.license, message)\n\n def perform_update(self, serializer):\n # WARNING: bypassing the direct super() on purpose\n super(CreateRetrieveUpdateListViewSet, self).perform_create(serializer)\n message = self.construct_change_message(serializer.instance, \"Changed\")\n self.log_action(self.request, serializer.instance.license, message)\n\n def perform_destroy(self, instance):\n super().perform_destroy(instance)\n message = self.construct_change_message(instance, \"Deleted\")\n self.log_action(self.request, instance.license, message)" }, { "identifier": "LicenseViewSet", "path": "license_library/api.py", "snippet": "class LicenseViewSet(CreateRetrieveUpdateListViewSet):\n queryset = License.objects.all()\n serializer_class = LicenseSerializer\n filterset_class = LicenseFilterSet\n lookup_field = \"uuid\"\n search_fields = (\n \"key\",\n \"name\",\n \"short_name\",\n \"spdx_license_key\",\n )\n ordering_fields = (\n \"key\",\n \"name\",\n \"short_name\",\n \"publication_year\",\n \"category\",\n \"license_style\",\n \"license_profile\",\n \"usage_policy\",\n \"curation_level\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = LicenseAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"license_profile\",\n \"license_style\",\n \"category\",\n \"owner__dataspace\",\n \"license_status\",\n \"usage_policy\",\n )\n .prefetch_related(\n \"licenseassignedtag_set__license_tag\",\n external_references_prefetch,\n )\n )" }, { "identifier": "OwnerViewSet", "path": "organization/api.py", "snippet": "class OwnerViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Owner.objects.all()\n serializer_class = OwnerSerializer\n lookup_field = \"uuid\"\n filterset_class = OwnerFilterSet\n search_fields = (\n \"name\",\n \"alias\",\n \"notes\",\n )\n search_fields_autocomplete = (\"name\",)\n ordering_fields = (\n \"name\",\n \"alias\",\n \"created_date\",\n \"last_modified_date\",\n )\n email_notification_on = OwnerAdmin.email_notification_on\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .prefetch_related(\n \"license_set\",\n \"component_set\",\n external_references_prefetch,\n )\n )" }, { "identifier": "UsagePolicyViewSet", "path": "policy/api.py", "snippet": "class UsagePolicyViewSet(ExtraPermissionsViewSetMixin, CreateRetrieveUpdateListViewSet):\n queryset = UsagePolicy.objects.all()\n serializer_class = UsagePolicySerializer\n lookup_field = \"uuid\"\n extra_permissions = (TabPermission,)\n search_fields = (\n \"label\",\n \"guidelines\",\n )\n ordering_fields = (\"label\",)\n allow_reference_access = True\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"content_type\",\n \"associated_product_relation_status\",\n )\n )" }, { "identifier": "CodebaseResourceViewSet", "path": "product_portfolio/api.py", "snippet": "class CodebaseResourceViewSet(ProductRelatedViewSet):\n queryset = CodebaseResource.objects.none()\n serializer_class = CodebaseResourceSerializer\n filterset_class = CodebaseResourceFilterSet\n search_fields = (\"path\",)\n ordering_fields = (\n \"path\",\n \"is_deployment_path\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .select_related(\n \"product_component__component\",\n \"product_package__package\",\n )\n .prefetch_related(\n \"related_deployed_from__deployed_from\",\n # This one is different from the `default_select_prefetch` as its using the m2m\n \"deployed_to__deployed_to\",\n \"product\",\n )\n )" }, { "identifier": "ProductComponentViewSet", "path": "product_portfolio/api.py", "snippet": "class ProductComponentViewSet(ProductRelationViewSet):\n relation_fk_field = \"component\"\n queryset = ProductComponent.objects.none()\n serializer_class = ProductComponentSerializer\n filterset_class = ProductComponentFilterSet\n search_fields = (\"notes\",)\n ordering_fields = (\n \"component\",\n \"review_status\",\n \"license_expression\",\n \"created_date\",\n \"last_modified_date\",\n )" }, { "identifier": "ProductPackageViewSet", "path": "product_portfolio/api.py", "snippet": "class ProductPackageViewSet(ProductRelationViewSet):\n relation_fk_field = \"package\"\n queryset = ProductPackage.objects.none()\n serializer_class = ProductPackageSerializer\n filterset_class = ProductPackageFilterSet\n search_fields = (\"notes\",)\n ordering_fields = (\n \"package\",\n \"review_status\",\n \"license_expression\",\n \"created_date\",\n \"last_modified_date\",\n )" }, { "identifier": "ProductViewSet", "path": "product_portfolio/api.py", "snippet": "class ProductViewSet(CreateRetrieveUpdateListViewSet):\n queryset = Product.objects.none()\n serializer_class = ProductSerializer\n filterset_class = ProductFilterSet\n lookup_field = \"uuid\"\n # `IsAuthenticated` and `DjangoModelPermissions` are the default values\n # set in the `DEFAULT_PERMISSION_CLASSES` settings.\n # See http://www.django-rest-framework.org/api-guide/permissions/#djangoobjectpermissions\n extra_permissions = (permissions.DjangoObjectPermissions,)\n search_fields = (\n \"name\",\n \"version\",\n \"copyright\",\n \"homepage_url\",\n )\n search_fields_autocomplete = (\n \"name\",\n \"version\",\n )\n ordering_fields = (\n \"name\",\n \"version\",\n \"configuration_status\",\n \"license_expression\",\n \"release_date\",\n \"copyright\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n return (\n Product.objects.get_queryset(self.request.user)\n .select_related(\n \"owner\",\n \"configuration_status\",\n )\n .prefetch_related(\n \"components\",\n \"packages\",\n \"licenses\",\n )\n )\n\n def perform_create(self, serializer):\n \"\"\"Add view/change/delete Object permissions to the Product creator.\"\"\"\n super().perform_create(serializer)\n assign_all_object_permissions(self.request.user, serializer.instance)" }, { "identifier": "ReportViewSet", "path": "reporting/api.py", "snippet": "class ReportViewSet(ExtraPermissionsViewSetMixin, viewsets.ReadOnlyModelViewSet):\n queryset = Report.objects.user_availables()\n serializer_class = ReportSerializer\n lookup_field = \"uuid\"\n filterset_class = ReportFilterSet\n extra_permissions = (TabPermission,)\n search_fields = (\"name\",)\n ordering_fields = (\"name\",)\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .scope(self.request.user.dataspace)\n .select_related(\n \"query__content_type\",\n \"column_template\",\n )\n )" }, { "identifier": "RequestTemplateViewSet", "path": "workflow/api.py", "snippet": "class RequestTemplateViewSet(ExtraPermissionsViewSetMixin, ReadOnlyModelViewSet):\n queryset = RequestTemplate.objects.all()\n serializer_class = RequestTemplateSerializer\n lookup_field = \"uuid\"\n extra_permissions = (TabPermission,)\n filterset_class = RequestTemplateFilterSet\n search_fields = (\n \"name\",\n \"description\",\n )\n ordering_fields = (\n \"name\",\n \"content_type\",\n )\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .scope(self.request.user.dataspace)\n .select_related(\n \"default_assignee\",\n \"content_type\",\n )\n .prefetch_related(\n \"questions\",\n )\n )" }, { "identifier": "RequestViewSet", "path": "workflow/api.py", "snippet": "class RequestViewSet(ExtraPermissionsViewSetMixin, CreateRetrieveUpdateListViewSet):\n queryset = Request.objects.all()\n serializer_class = RequestSerializer\n lookup_field = \"uuid\"\n filterset_class = RequestFilterSet\n extra_permissions = (TabPermission,)\n search_fields = (\n \"title\",\n \"serialized_data\",\n )\n search_fields_autocomplete = (\"title\",)\n ordering_fields = (\n \"title\",\n \"request_template\",\n \"status\",\n \"priority\",\n \"assignee\",\n \"requester\",\n \"created_date\",\n \"last_modified_date\",\n )\n\n def get_queryset(self):\n user = self.request.user\n qs = (\n super()\n .get_queryset()\n .product_secured(user)\n .select_related(\n \"request_template\",\n \"requester\",\n \"assignee\",\n \"priority\",\n \"product_context\",\n \"content_type\",\n )\n .prefetch_related( # one extra query per content_type\n \"content_object\",\n )\n )\n if not user.is_staff:\n qs = qs.filter(is_private=False)\n return qs\n\n def perform_create(self, serializer):\n super().perform_create(serializer)\n send_request_notification(serializer.instance, created=True)\n\n def perform_update(self, serializer):\n super().perform_update(serializer)\n send_request_notification(serializer.instance, created=False)\n serializer.instance.events.create(\n user=self.request.user,\n text=\"Request edited.\",\n event_type=RequestEvent.EDIT,\n dataspace=self.request.user.dataspace,\n )" } ]
from django.conf import settings from django.conf.urls import include from django.contrib import admin from django.contrib.auth import views as auth_views from django.contrib.auth.decorators import login_required from django.template.loader import render_to_string from django.urls import path from django.views.defaults import page_not_found from django.views.generic import RedirectView from django.views.generic import TemplateView from notifications.views import mark_all_as_read from rest_framework.documentation import include_docs_urls from rest_framework.routers import DefaultRouter from component_catalog.api import ComponentViewSet from component_catalog.api import PackageViewSet from component_catalog.api import SubcomponentViewSet from component_catalog.views import send_scan_notification from dje import two_factor from dje.admin import dejacode_site from dje.api import ExternalReferenceViewSet from dje.forms import DejaCodeAuthenticationForm from dje.registration import DejaCodeActivationView from dje.registration import DejaCodeRegistrationForm from dje.views import AccountProfileView from dje.views import AllNotificationsList from dje.views import DataspaceAwareAutocompleteLookup from dje.views import DataspaceAwareRelatedLookup from dje.views import GlobalSearchListView from dje.views import IntegrationsStatusView from dje.views import UnreadNotificationsList from dje.views import home_view from dje.views import index_dispatch from dje.views import urn_resolve_view from license_library.api import LicenseAnnotationViewSet from license_library.api import LicenseViewSet from organization.api import OwnerViewSet from policy.api import UsagePolicyViewSet from product_portfolio.api import CodebaseResourceViewSet from product_portfolio.api import ProductComponentViewSet from product_portfolio.api import ProductPackageViewSet from product_portfolio.api import ProductViewSet from reporting.api import ReportViewSet from workflow.api import RequestTemplateViewSet from workflow.api import RequestViewSet from django_registration.backends.activation.views import RegistrationView
11,751
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # # Replace the default admin site with the DejaCode one. admin.site = dejacode_site # Restframework API api_router = DefaultRouter() api_router.register("owners", OwnerViewSet) api_router.register("licenses", LicenseViewSet) api_router.register("license_annotations", LicenseAnnotationViewSet) api_router.register("components", ComponentViewSet) api_router.register("subcomponents", SubcomponentViewSet) api_router.register("packages", PackageViewSet) api_router.register("products", ProductViewSet) api_router.register("product_components", ProductComponentViewSet) api_router.register("product_packages", ProductPackageViewSet) api_router.register("codebase_resources", CodebaseResourceViewSet) api_router.register("request_templates", RequestTemplateViewSet) api_router.register("requests", RequestViewSet)
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # # Replace the default admin site with the DejaCode one. admin.site = dejacode_site # Restframework API api_router = DefaultRouter() api_router.register("owners", OwnerViewSet) api_router.register("licenses", LicenseViewSet) api_router.register("license_annotations", LicenseAnnotationViewSet) api_router.register("components", ComponentViewSet) api_router.register("subcomponents", SubcomponentViewSet) api_router.register("packages", PackageViewSet) api_router.register("products", ProductViewSet) api_router.register("product_components", ProductComponentViewSet) api_router.register("product_packages", ProductPackageViewSet) api_router.register("codebase_resources", CodebaseResourceViewSet) api_router.register("request_templates", RequestTemplateViewSet) api_router.register("requests", RequestViewSet)
api_router.register("reports", ReportViewSet)
28
2023-12-07 16:57:42+00:00
16k
wusize/CLIM
src/open_clip/factory.py
[ { "identifier": "OPENAI_DATASET_MEAN", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)" }, { "identifier": "OPENAI_DATASET_STD", "path": "src/open_clip/constants.py", "snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)" }, { "identifier": "CLIP", "path": "src/open_clip/model.py", "snippet": "class CLIP(nn.Module):\n output_dict: torch.jit.Final[bool]\n\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n output_dict: bool = False,\n freeze_text=True,\n ):\n assert freeze_text, 'For now we must freeze text'\n super().__init__()\n self.output_dict = output_dict\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n\n text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n if freeze_text:\n print(f'Freeze text encoder parameters', flush=True)\n for param in text.parameters():\n param.requires_grad = False\n text.eval()\n self.transformer = text.transformer\n self.vocab_size = text.vocab_size\n self.embed_dim = embed_dim\n self.token_embedding = text.token_embedding\n self.positional_embedding = text.positional_embedding\n self.ln_final = text.ln_final\n self.text_projection = text.text_projection\n self.register_buffer('attn_mask', text.attn_mask, persistent=False)\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs):\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.transformer.grad_checkpointing = enable\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_dense(self, image, normalize: bool = False, keep_shape=False):\n features = self.visual.encode_dense(image, keep_shape=keep_shape)\n if normalize:\n if keep_shape:\n features = F.normalize(features, dim=1)\n else:\n features = F.normalize(features, dim=-1)\n return features\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False,\n extract_type='v1'):\n features = self.visual.extract_roi_features(image, normed_boxes,\n extract_type=extract_type)\n if normalize:\n features = F.normalize(features, dim=-1)\n return features\n\n def _pool_masks(self, image, masks, normalize, mask_attn=False):\n if mask_attn:\n mask_pooled = self.visual.mask_attn_pool(image, masks)\n else:\n mask_pooled = self.visual.mask_pool(image, masks)\n if normalize:\n mask_pooled = F.normalize(mask_pooled, dim=-1)\n return mask_pooled\n\n def _pool_masks_v3(self, image, masks, normalize):\n mask_pooled_v1, x_dense = self.visual.mask_attn_pool(image, masks, return_dense=True)\n x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c\n x_dense = torch.repeat_interleave(\n x_dense, torch.tensor([len(m) for m in masks], device=x_dense.device), dim=0)\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n mask_pooled_v2 = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True)\n if normalize:\n mask_pooled_v1 = F.normalize(mask_pooled_v1, dim=-1)\n mask_pooled_v2 = F.normalize(mask_pooled_v2, dim=-1)\n return mask_pooled_v1, mask_pooled_v2\n\n def encode_masks(self, image, masks, normalize=True, mask_attn=False):\n return self._pool_masks(image, masks, normalize, mask_attn)\n\n def encode_text(self, text, normalize: bool = False):\n cast_dtype = self.transformer.get_cast_dtype()\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n\n x = x + self.positional_embedding.to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=self.attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return F.normalize(x, dim=-1) if normalize else x\n\n def forward(self, image, text=None):\n image_features = self.encode_image(image, normalize=True)\n if text is None:\n text_features = None\n else:\n text_features = self.encode_text(text, normalize=True)\n if self.output_dict:\n return {\n \"image_features\": image_features,\n \"text_features\": text_features,\n \"logit_scale\": self.logit_scale.exp()\n }\n return image_features, text_features, self.logit_scale.exp()\n\n def train(self, mode: bool = True):\n if not isinstance(mode, bool):\n raise ValueError(\"training mode is expected to be boolean\")\n self.training = mode\n for name, module in self.named_children():\n if name == 'visual':\n if mode:\n logging.info(f'========Set module {name} as train mode========')\n else:\n logging.info(f'========Set module {name} as eval mode========')\n module.train(mode)\n else:\n logging.info(f'========Set module {name} as eval mode========')\n module.train(mode=False)\n return self" }, { "identifier": "CustomTextCLIP", "path": "src/open_clip/model.py", "snippet": "class CustomTextCLIP(nn.Module):\n output_dict: torch.jit.Final[bool]\n\n def __init__(\n self,\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n output_dict: bool = False,\n ):\n super().__init__()\n self.output_dict = output_dict\n self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)\n self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n\n def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):\n # lock image tower as per LiT - https://arxiv.org/abs/2111.07991\n self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)\n\n def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n self.text.lock(unlocked_layers, freeze_layer_norm)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n\n def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False):\n features = self.visual.extract_roi_features(image, normed_boxes)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_image(self, image, normalize: bool = False):\n features = self.visual(image)\n return F.normalize(features, dim=-1) if normalize else features\n\n def encode_text(self, text, normalize: bool = False):\n features = self.text(text)\n return F.normalize(features, dim=-1) if normalize else features\n\n def forward(self, image, text):\n image_features = self.encode_image(image, normalize=True)\n if text is None:\n text_features = None\n else:\n text_features = self.encode_text(text, normalize=True)\n if self.output_dict:\n return {\n \"image_features\": image_features,\n \"text_features\": text_features,\n \"logit_scale\": self.logit_scale.exp()\n }\n return image_features, text_features, self.logit_scale.exp()" }, { "identifier": "convert_weights_to_lp", "path": "src/open_clip/model.py", "snippet": "def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):\n \"\"\"Convert applicable model parameters to low-precision (bf16 or fp16)\"\"\"\n\n def _convert_weights(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):\n l.weight.data = l.weight.data.to(dtype)\n if l.bias is not None:\n l.bias.data = l.bias.data.to(dtype)\n\n if isinstance(l, (nn.MultiheadAttention, Attention)):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.to(dtype)\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.to(dtype)\n\n model.apply(_convert_weights)" }, { "identifier": "convert_to_custom_text_state_dict", "path": "src/open_clip/model.py", "snippet": "def convert_to_custom_text_state_dict(state_dict: dict):\n if 'text_projection' in state_dict:\n # old format state_dict, move text tower -> .text\n new_state_dict = {}\n for k, v in state_dict.items():\n if any(k.startswith(p) for p in (\n 'text_projection',\n 'positional_embedding',\n 'token_embedding',\n 'transformer',\n 'ln_final',\n )):\n k = 'text.' + k\n new_state_dict[k] = v\n return new_state_dict\n return state_dict" }, { "identifier": "resize_pos_embed", "path": "src/open_clip/model.py", "snippet": "def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True):\n # Rescale the grid of position embeddings when loading from state_dict\n old_pos_embed = state_dict.get('visual.positional_embedding', None)\n if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):\n return\n grid_size = to_2tuple(model.visual.grid_size)\n extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)\n new_seq_len = grid_size[0] * grid_size[1] + extra_tokens\n if new_seq_len == old_pos_embed.shape[0]:\n return\n\n if extra_tokens:\n pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]\n else:\n pos_emb_tok, pos_emb_img = None, old_pos_embed\n old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))\n\n logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)\n pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)\n pos_emb_img = F.interpolate(\n pos_emb_img,\n size=grid_size,\n mode=interpolation,\n antialias=antialias,\n align_corners=False,\n )\n pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]\n if pos_emb_tok is not None:\n new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)\n else:\n new_pos_embed = pos_emb_img\n state_dict['visual.positional_embedding'] = new_pos_embed" }, { "identifier": "get_cast_dtype", "path": "src/open_clip/model.py", "snippet": "def get_cast_dtype(precision: str):\n cast_dtype = None\n if precision == 'bf16':\n cast_dtype = torch.bfloat16\n elif precision == 'fp16':\n cast_dtype = torch.float16\n return cast_dtype" }, { "identifier": "CoCa", "path": "src/open_clip/coca_model.py", "snippet": "class CoCa(nn.Module):\n def __init__(\n self,\n embed_dim,\n multimodal_cfg: MultimodalCfg,\n text_cfg: CLIPTextCfg,\n vision_cfg: CLIPVisionCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n pad_id: int = 0,\n ):\n super().__init__()\n multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg\n text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg\n vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg\n\n self.text = _build_text_tower(\n embed_dim=embed_dim,\n text_cfg=text_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n vocab_size = (\n text_cfg.vocab_size # for hf models\n if hasattr(text_cfg, \"hf_model_name\") and text_cfg.hf_model_name is not None\n else text_cfg.vocab_size\n )\n\n self.visual = _build_vision_tower(\n embed_dim=embed_dim,\n vision_cfg=vision_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n self.text_decoder = _build_text_decoder_tower(\n vocab_size,\n multimodal_cfg=multimodal_cfg,\n quick_gelu=quick_gelu,\n cast_dtype=cast_dtype,\n )\n\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.pad_id = pad_id\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.visual.set_grad_checkpointing(enable)\n self.text.set_grad_checkpointing(enable)\n self.text_decoder.set_grad_checkpointing(enable)\n\n def _encode_image(self, images, normalize=True):\n image_latent, tokens_embs = self.visual(images)\n image_latent = F.normalize(image_latent, dim=-1) if normalize else image_latent\n return image_latent, tokens_embs\n\n def _encode_text(self, text, normalize=True, embed_cls=True):\n text = text[:, :-1] if embed_cls else text # make space for CLS token\n text_latent, token_emb = self.text(text)\n text_latent = F.normalize(text_latent, dim=-1) if normalize else text_latent\n return text_latent, token_emb\n\n def encode_image(self, images, normalize=True):\n image_latent, _ = self._encode_image(images, normalize=normalize)\n return image_latent\n\n def encode_text(self, text, normalize=True, embed_cls=True):\n text_latent, _ = self._encode_text(text, normalize=normalize, embed_cls=embed_cls)\n return text_latent\n\n def forward(self, image, text, embed_cls=True, image_latent=None, image_embs=None):\n text_latent, token_embs = self._encode_text(text, embed_cls=embed_cls)\n if image_latent is None or image_embs is None:\n image_latent, image_embs = self._encode_image(image)\n\n # TODO: add assertion to avoid bugs?\n labels = text[:, -token_embs.shape[1]:]\n\n logits = self.text_decoder(image_embs, token_embs)\n return {\n \"image_features\": image_latent,\n \"text_features\": text_latent,\n \"logits\": logits,\n \"labels\": labels,\n \"logit_scale\": self.logit_scale.exp()\n }\n\n def generate(\n self,\n image,\n text=None,\n seq_len=30,\n max_seq_len=77,\n temperature=1.,\n generation_type=\"beam_search\",\n top_p=0.1, # keep tokens in the 1 - top_p quantile\n top_k=1, # keeps the top_k most probable tokens\n pad_token_id=None,\n eos_token_id=None,\n sot_token_id=None,\n num_beams=6,\n num_beam_groups=3,\n min_seq_len=5,\n stopping_criteria=None,\n repetition_penalty=1.0,\n fixed_output_length=False # if True output.shape == (batch_size, seq_len)\n ):\n # taking many ideas and components from HuggingFace GenerationMixin\n # https://huggingface.co/docs/transformers/main/en/main_classes/text_generation\n assert _has_transformers, \"Please install transformers for generate functionality. `pip install transformers`.\"\n assert seq_len > min_seq_len, \"seq_len must be larger than min_seq_len\"\n\n with torch.no_grad():\n sot_token_id = 49406 if sot_token_id is None else sot_token_id\n eos_token_id = 49407 if eos_token_id is None else eos_token_id\n pad_token_id = self.pad_id if pad_token_id is None else pad_token_id\n logit_processor = LogitsProcessorList(\n [\n MinLengthLogitsProcessor(min_seq_len, eos_token_id),\n RepetitionPenaltyLogitsProcessor(repetition_penalty),\n ]\n )\n\n if stopping_criteria is None:\n stopping_criteria = [MaxLengthCriteria(max_length=seq_len)]\n\n stopping_criteria = StoppingCriteriaList(\n stopping_criteria\n )\n\n device = image.device\n\n if generation_type == \"beam_search\":\n output = self._generate_beamsearch(\n image_inputs = image,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n sot_token_id=sot_token_id,\n num_beams=num_beams,\n num_beam_groups=num_beam_groups,\n min_seq_len=min_seq_len,\n stopping_criteria=stopping_criteria,\n logit_processor=logit_processor,\n )\n if fixed_output_length and output.shape[1] < seq_len:\n return torch.cat(\n (output, torch.ones(output.shape[0], seq_len-output.shape[1], device=device, dtype=output.dtype) * self.pad_id),\n dim=1\n )\n return output\n\n elif generation_type == \"top_p\":\n logit_warper = GENERATION_TYPES[generation_type](top_p)\n elif generation_type == \"top_k\":\n logit_warper = GENERATION_TYPES[generation_type](top_k)\n else:\n raise ValueError(\n f\"generation_type has to be one of \"\n f\"{'| ' + ' | '.join(list(GENERATION_TYPES.keys())) + ' |'}.\"\n )\n\n image_latent, image_embs = self._encode_image(image)\n\n if text is None:\n text = torch.ones((image.shape[0], 1), device=device, dtype=torch.long) * sot_token_id\n\n was_training = self.training\n num_dims = len(text.shape)\n\n if num_dims == 1:\n text = text[None, :]\n\n cur_len = text.shape[1]\n self.eval()\n out = text\n\n while True:\n x = out[:, -max_seq_len:]\n cur_len = x.shape[1]\n logits = self(image, x, image_latent=image_latent, image_embs=image_embs, embed_cls=False)[\"logits\"][:, -1]\n mask = (out[:, -1] == eos_token_id) | (out[:, -1] == pad_token_id)\n sample = torch.ones((out.shape[0], 1), device=device, dtype=torch.long) * pad_token_id\n\n if mask.all():\n if not fixed_output_length:\n break\n else:\n logits = logits[~mask, :]\n filtered_logits = logit_processor(x[~mask, :], logits)\n filtered_logits = logit_warper(x[~mask, :], filtered_logits)\n probs = F.softmax(filtered_logits / temperature, dim=-1)\n\n if (cur_len + 1 == seq_len):\n sample[~mask, :] = torch.ones((sum(~mask), 1), device=device, dtype=torch.long) * eos_token_id\n else:\n sample[~mask, :] = torch.multinomial(probs, 1)\n\n out = torch.cat((out, sample), dim=-1)\n\n cur_len += 1\n\n if stopping_criteria(out, None):\n break\n\n if num_dims == 1:\n out = out.squeeze(0)\n\n self.train(was_training)\n return out\n\n def _generate_beamsearch(\n self,\n image_inputs,\n pad_token_id=None,\n eos_token_id=None,\n sot_token_id=None,\n num_beams=6,\n num_beam_groups=3,\n min_seq_len=5,\n stopping_criteria=None,\n logit_processor=None,\n logit_warper=None,\n ):\n device = image_inputs.device\n batch_size = image_inputs.shape[0]\n image_inputs = torch.repeat_interleave(image_inputs, num_beams, dim=0)\n image_latent, image_embs = self._encode_image(image_inputs)\n\n input_ids = torch.ones((batch_size * num_beams, 1), device=device, dtype=torch.long)\n input_ids = input_ids * sot_token_id\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=num_beams,\n device=device,\n num_beam_groups=num_beam_groups,\n )\n # instantiate logits processors\n logits_processor = (\n LogitsProcessorList([MinLengthLogitsProcessor(min_seq_len, eos_token_id=eos_token_id)])\n if logit_processor is None\n else logit_processor\n )\n\n batch_size = len(beam_scorer._beam_hyps)\n num_beams = beam_scorer.num_beams\n num_beam_groups = beam_scorer.num_beam_groups\n num_sub_beams = num_beams // num_beam_groups\n batch_beam_size, cur_len = input_ids.shape\n beam_indices = None\n\n if num_beams * batch_size != batch_beam_size:\n raise ValueError(\n f\"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}.\"\n )\n\n beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)\n # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in\n # the same group don't produce same tokens everytime.\n beam_scores[:, ::num_sub_beams] = 0\n beam_scores = beam_scores.view((batch_size * num_beams,))\n\n while True:\n\n # predicted tokens in cur_len step\n current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)\n\n # indices which will form the beams in the next time step\n reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)\n\n # do one decoder step on all beams of all sentences in batch\n model_inputs = prepare_inputs_for_generation(input_ids=input_ids, image_inputs=image_inputs)\n outputs = self(\n model_inputs['images'],\n model_inputs['text'],\n embed_cls=False,\n image_latent=image_latent,\n image_embs=image_embs\n )\n\n for beam_group_idx in range(num_beam_groups):\n group_start_idx = beam_group_idx * num_sub_beams\n group_end_idx = min(group_start_idx + num_sub_beams, num_beams)\n group_size = group_end_idx - group_start_idx\n\n # indices of beams of current group among all sentences in batch\n batch_group_indices = []\n\n for batch_idx in range(batch_size):\n batch_group_indices.extend(\n [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]\n )\n group_input_ids = input_ids[batch_group_indices]\n\n # select outputs of beams of currentg group only\n next_token_logits = outputs['logits'][batch_group_indices, -1, :]\n vocab_size = next_token_logits.shape[-1]\n\n next_token_scores_processed = logits_processor(\n group_input_ids, next_token_logits, current_tokens=current_tokens, beam_group_idx=beam_group_idx\n )\n next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)\n next_token_scores = next_token_scores.expand_as(next_token_scores_processed)\n\n # reshape for beam search\n next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)\n\n next_token_scores, next_tokens = torch.topk(\n next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True\n )\n\n next_indices = torch.div(next_tokens, vocab_size, rounding_mode=\"floor\")\n next_tokens = next_tokens % vocab_size\n\n # stateless\n process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None\n beam_outputs = beam_scorer.process(\n group_input_ids,\n next_token_scores,\n next_tokens,\n next_indices,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n beam_indices=process_beam_indices,\n )\n beam_scores[batch_group_indices] = beam_outputs[\"next_beam_scores\"]\n beam_next_tokens = beam_outputs[\"next_beam_tokens\"]\n beam_idx = beam_outputs[\"next_beam_indices\"]\n\n input_ids[batch_group_indices] = group_input_ids[beam_idx]\n group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)\n current_tokens[batch_group_indices] = group_input_ids[:, -1]\n\n # (beam_idx // group_size) -> batch_idx\n # (beam_idx % group_size) -> offset of idx inside the group\n reordering_indices[batch_group_indices] = (\n num_beams * torch.div(beam_idx, group_size, rounding_mode=\"floor\") + group_start_idx + (beam_idx % group_size)\n )\n\n input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)\n\n # increase cur_len\n cur_len = cur_len + 1\n if beam_scorer.is_done or stopping_criteria(input_ids, None):\n break\n\n final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None\n sequence_outputs = beam_scorer.finalize(\n input_ids,\n beam_scores,\n next_tokens,\n next_indices,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n max_length=stopping_criteria.max_length,\n beam_indices=final_beam_indices,\n )\n return sequence_outputs['sequences']" }, { "identifier": "ClipLoss", "path": "src/open_clip/loss.py", "snippet": "class ClipLoss(nn.Module):\n\n def __init__(\n self,\n local_loss=False,\n gather_with_grad=False,\n cache_labels=False,\n rank=0,\n world_size=1,\n use_horovod=False,\n ):\n super().__init__()\n self.local_loss = local_loss\n self.gather_with_grad = gather_with_grad\n self.cache_labels = cache_labels\n self.rank = rank\n self.world_size = world_size\n self.use_horovod = use_horovod\n\n # cache state\n self.prev_num_logits = 0\n self.labels = {}\n\n def get_ground_truth(self, device, num_logits) -> torch.Tensor:\n # calculated ground-truth and cache if enabled\n if self.prev_num_logits != num_logits or device not in self.labels:\n labels = torch.arange(num_logits, device=device, dtype=torch.long)\n if self.world_size > 1 and self.local_loss:\n labels = labels + num_logits * self.rank\n if self.cache_labels:\n self.labels[device] = labels\n self.prev_num_logits = num_logits\n else:\n labels = self.labels[device]\n return labels\n\n def get_logits(self, image_features, text_features, logit_scale):\n if self.world_size > 1:\n all_image_features, all_text_features = gather_features(\n image_features, text_features,\n self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)\n\n if self.local_loss:\n logits_per_image = logit_scale * image_features @ all_text_features.T\n logits_per_text = logit_scale * text_features @ all_image_features.T\n else:\n logits_per_image = logit_scale * all_image_features @ all_text_features.T\n logits_per_text = logits_per_image.T\n else:\n logits_per_image = logit_scale * image_features @ text_features.T\n logits_per_text = logit_scale * text_features @ image_features.T\n \n return logits_per_image, logits_per_text\n\n def forward(self, image_features, text_features, logit_scale, output_dict=False):\n device = image_features.device\n logits_per_image, logits_per_text = self.get_logits(image_features, text_features, logit_scale)\n\n labels = self.get_ground_truth(device, logits_per_image.shape[0])\n\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n return {\"contrastive_loss\": total_loss} if output_dict else total_loss" }, { "identifier": "DistillClipLoss", "path": "src/open_clip/loss.py", "snippet": "class DistillClipLoss(ClipLoss):\n\n def dist_loss(self, teacher_logits, student_logits):\n loss = F.kl_div(student_logits.log_softmax(dim=1),\n teacher_logits.softmax(dim=1), reduction='batchmean')\n return loss\n # return -(teacher_logits.softmax(dim=1) * student_logits.log_softmax(dim=1)).sum(dim=1).mean(dim=0)\n\n def forward(\n self,\n image_features,\n text_features,\n logit_scale,\n dist_image_features,\n dist_text_features,\n dist_logit_scale,\n output_dict=False,\n ):\n logits_per_image, logits_per_text = \\\n self.get_logits(image_features, text_features, logit_scale)\n\n dist_logits_per_image, dist_logits_per_text = \\\n self.get_logits(dist_image_features, dist_text_features, dist_logit_scale)\n\n labels = self.get_ground_truth(image_features.device, logits_per_image.shape[0])\n\n contrastive_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n distill_loss = (\n self.dist_loss(dist_logits_per_image, logits_per_image) +\n self.dist_loss(dist_logits_per_text, logits_per_text)\n ) / 2\n\n if output_dict:\n return {\"contrastive_loss\": contrastive_loss, \"loss_kl\": distill_loss}\n\n return contrastive_loss, distill_loss" }, { "identifier": "CoCaLoss", "path": "src/open_clip/loss.py", "snippet": "class CoCaLoss(ClipLoss):\n def __init__(\n self,\n caption_loss_weight,\n clip_loss_weight,\n pad_id=0, # pad_token for open_clip custom tokenizer\n local_loss=False,\n gather_with_grad=False,\n cache_labels=False,\n rank=0,\n world_size=1,\n use_horovod=False,\n ):\n super().__init__(\n local_loss=local_loss,\n gather_with_grad=gather_with_grad,\n cache_labels=cache_labels,\n rank=rank,\n world_size=world_size,\n use_horovod=use_horovod\n )\n\n self.clip_loss_weight = clip_loss_weight\n self.caption_loss_weight = caption_loss_weight\n self.caption_loss = nn.CrossEntropyLoss(ignore_index=pad_id)\n\n def forward(self, image_features, text_features, logits, labels, logit_scale, output_dict=False):\n clip_loss = super().forward(image_features, text_features, logit_scale)\n clip_loss = self.clip_loss_weight * clip_loss\n\n caption_loss = self.caption_loss(\n logits.permute(0, 2, 1),\n labels,\n )\n caption_loss = caption_loss * self.caption_loss_weight\n\n if output_dict:\n return {\"contrastive_loss\": clip_loss, \"caption_loss\": caption_loss}\n\n return clip_loss, caption_loss" }, { "identifier": "load_openai_model", "path": "src/open_clip/openai.py", "snippet": "def load_openai_model(\n name: str,\n precision: Optional[str] = None,\n device: Optional[Union[str, torch.device]] = None,\n jit: bool = True,\n cache_dir: Optional[str] = None,\n):\n \"\"\"Load a CLIP model\n\n Parameters\n ----------\n name : str\n A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict\n precision: str\n Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.\n device : Union[str, torch.device]\n The device to put the loaded model\n jit : bool\n Whether to load the optimized JIT model (default) or more hackable non-JIT model.\n cache_dir : Optional[str]\n The directory to cache the downloaded model weights\n\n Returns\n -------\n model : torch.nn.Module\n The CLIP model\n preprocess : Callable[[PIL.Image], torch.Tensor]\n A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input\n \"\"\"\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if precision is None:\n precision = 'fp32' if device == 'cpu' else 'fp16'\n\n if get_pretrained_url(name, 'openai'):\n model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)\n elif os.path.isfile(name):\n model_path = name\n else:\n raise RuntimeError(f\"Model {name} not found; available models = {list_openai_models()}\")\n\n try:\n # loading JIT archive\n model = torch.jit.load(model_path, map_location=device if jit else \"cpu\").eval()\n state_dict = None\n except RuntimeError:\n # loading saved state dict\n if jit:\n warnings.warn(f\"File {model_path} is not a JIT archive. Loading as a state dict instead\")\n jit = False\n state_dict = torch.load(model_path, map_location=\"cpu\")\n\n if not jit:\n # Build a non-jit model from the OpenAI jitted model state dict\n cast_dtype = get_cast_dtype(precision)\n try:\n model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)\n except KeyError:\n sd = {k[7:]: v for k, v in state_dict[\"state_dict\"].items()}\n model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)\n\n # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use\n model = model.to(device)\n if precision.startswith('amp') or precision == 'fp32':\n model.float()\n elif precision == 'bf16':\n convert_weights_to_lp(model, dtype=torch.bfloat16)\n\n return model\n\n # patch the device names\n device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])\n device_node = [n for n in device_holder.graph.findAllNodes(\"prim::Constant\") if \"Device\" in repr(n)][-1]\n\n def patch_device(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"prim::Constant\"):\n if \"value\" in node.attributeNames() and str(node[\"value\"]).startswith(\"cuda\"):\n node.copyAttributes(device_node)\n\n model.apply(patch_device)\n patch_device(model.encode_image)\n patch_device(model.encode_text)\n\n # patch dtype to float32 (typically for CPU)\n if precision == 'fp32':\n float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])\n float_input = list(float_holder.graph.findNode(\"aten::to\").inputs())[1]\n float_node = float_input.node()\n\n def patch_float(module):\n try:\n graphs = [module.graph] if hasattr(module, \"graph\") else []\n except RuntimeError:\n graphs = []\n\n if hasattr(module, \"forward1\"):\n graphs.append(module.forward1.graph)\n\n for graph in graphs:\n for node in graph.findAllNodes(\"aten::to\"):\n inputs = list(node.inputs())\n for i in [1, 2]: # dtype can be the second or third argument to aten::to()\n if inputs[i].node()[\"value\"] == 5:\n inputs[i].node().copyAttributes(float_node)\n\n model.apply(patch_float)\n patch_float(model.encode_image)\n patch_float(model.encode_text)\n model.float()\n\n # ensure image_size attr available at consistent location for both jit and non-jit\n model.visual.image_size = model.input_resolution.item()\n return model" }, { "identifier": "is_pretrained_cfg", "path": "src/open_clip/pretrained.py", "snippet": "def is_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return False\n return _clean_tag(tag) in _PRETRAINED[model]" }, { "identifier": "get_pretrained_cfg", "path": "src/open_clip/pretrained.py", "snippet": "def get_pretrained_cfg(model: str, tag: str):\n if model not in _PRETRAINED:\n return {}\n model_pretrained = _PRETRAINED[model]\n return model_pretrained.get(_clean_tag(tag), {})" }, { "identifier": "download_pretrained", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained(\n cfg: Dict,\n force_hf_hub: bool = False,\n cache_dir: Union[str, None] = None,\n):\n target = ''\n if not cfg:\n return target\n\n download_url = cfg.get('url', '')\n download_hf_hub = cfg.get('hf_hub', '')\n if download_hf_hub and force_hf_hub:\n # use HF hub even if url exists\n download_url = ''\n\n if download_url:\n target = download_pretrained_from_url(download_url, cache_dir=cache_dir)\n elif download_hf_hub:\n has_hf_hub(True)\n # we assume the hf_hub entries in pretrained config combine model_id + filename in\n # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and\n # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.\n model_id, filename = os.path.split(download_hf_hub)\n if filename:\n target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)\n else:\n target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)\n\n return target" }, { "identifier": "list_pretrained_tags_by_model", "path": "src/open_clip/pretrained.py", "snippet": "def list_pretrained_tags_by_model(model: str):\n \"\"\" return all pretrain tags for the specified model architecture \"\"\"\n tags = []\n if model in _PRETRAINED:\n tags.extend(_PRETRAINED[model].keys())\n return tags" }, { "identifier": "download_pretrained_from_hf", "path": "src/open_clip/pretrained.py", "snippet": "def download_pretrained_from_hf(\n model_id: str,\n filename: str = 'open_clip_pytorch_model.bin',\n revision=None,\n cache_dir: Union[str, None] = None,\n):\n has_hf_hub(True)\n cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)\n return cached_file" }, { "identifier": "image_transform", "path": "src/open_clip/transform.py", "snippet": "def image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n resize_longest_max: bool = False,\n fill_color: int = 0,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n if isinstance(aug_cfg, dict):\n aug_cfg = AugmentationCfg(**aug_cfg)\n else:\n aug_cfg = aug_cfg or AugmentationCfg()\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n aug_cfg_dict = {k: v for k, v in asdict(aug_cfg).items() if v is not None}\n use_timm = aug_cfg_dict.pop('use_timm', False)\n if use_timm:\n from timm.data import create_transform # timm can still be optional\n if isinstance(image_size, (tuple, list)):\n assert len(image_size) >= 2\n input_size = (3,) + image_size[-2:]\n else:\n input_size = (3, image_size, image_size)\n # by default, timm aug randomly alternates bicubic & bilinear for better robustness at inference time\n aug_cfg_dict.setdefault('interpolation', 'random')\n aug_cfg_dict.setdefault('color_jitter', None) # disable by default\n train_transform = create_transform(\n input_size=input_size,\n is_training=True,\n hflip=0.,\n mean=mean,\n std=std,\n re_mode='pixel',\n **aug_cfg_dict,\n )\n else:\n train_transform = Compose([\n RandomResizedCrop(\n image_size,\n scale=aug_cfg_dict.pop('scale'),\n interpolation=InterpolationMode.BICUBIC,\n ),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n if aug_cfg_dict:\n warnings.warn(f'Unused augmentation cfg items, specify `use_timm` to use ({list(aug_cfg_dict.keys())}).')\n return train_transform\n else:\n if resize_longest_max:\n transforms = [\n ResizeMaxSize(image_size, fill=fill_color)\n ]\n else:\n transforms = [\n Resize(image_size, interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ]\n transforms.extend([\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ])\n return Compose(transforms)" }, { "identifier": "AugmentationCfg", "path": "src/open_clip/transform.py", "snippet": "class AugmentationCfg:\n scale: Tuple[float, float] = (0.9, 1.0)\n ratio: Optional[Tuple[float, float]] = None\n color_jitter: Optional[Union[float, Tuple[float, float, float]]] = None\n interpolation: Optional[str] = None\n re_prob: Optional[float] = None\n re_count: Optional[int] = None\n use_timm: bool = False" }, { "identifier": "det_image_transform", "path": "src/open_clip/transform.py", "snippet": "def det_image_transform(\n image_size: int,\n is_train: bool,\n mean: Optional[Tuple[float, ...]] = None,\n std: Optional[Tuple[float, ...]] = None,\n fill_color: int = 0,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n):\n mean = mean or OPENAI_DATASET_MEAN\n if not isinstance(mean, (list, tuple)):\n mean = (mean,) * 3\n\n std = std or OPENAI_DATASET_STD\n if not isinstance(std, (list, tuple)):\n std = (std,) * 3\n\n if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:\n # for square size, pass size as int so that Resize() uses aspect preserving shortest edge\n image_size = image_size[0]\n\n normalize = Normalize(mean=mean, std=std)\n if is_train:\n raise NotImplementedError\n else:\n transforms = [\n ResizeLongest(image_size, fill=fill_color),\n _convert_to_rgb,\n ToTensor(),\n normalize,\n ]\n return Compose(transforms)" }, { "identifier": "HFTokenizer", "path": "src/open_clip/tokenizer.py", "snippet": "class HFTokenizer:\n \"\"\"HuggingFace tokenizer wrapper\"\"\"\n\n def __init__(self, tokenizer_name: str):\n from transformers import AutoTokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n\n def save_pretrained(self, dest):\n self.tokenizer.save_pretrained(dest)\n\n def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:\n # same cleaning as for default tokenizer, except lowercasing\n # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance\n if isinstance(texts, str):\n texts = [texts]\n texts = [whitespace_clean(basic_clean(text)) for text in texts]\n input_ids = self.tokenizer(\n texts,\n return_tensors='pt',\n max_length=context_length,\n padding='max_length',\n truncation=True,\n ).input_ids\n return input_ids" }, { "identifier": "tokenize", "path": "src/open_clip/tokenizer.py", "snippet": "def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:\n \"\"\"\n Returns the tokenized representation of given input string(s)\n\n Parameters\n ----------\n texts : Union[str, List[str]]\n An input string or a list of input strings to tokenize\n context_length : int\n The context length to use; all CLIP models use 77 as the context length\n\n Returns\n -------\n A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]\n \"\"\"\n if isinstance(texts, str):\n texts = [texts]\n\n sot_token = _tokenizer.encoder[\"<start_of_text>\"]\n eot_token = _tokenizer.encoder[\"<end_of_text>\"]\n all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]\n result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)\n\n for i, tokens in enumerate(all_tokens):\n if len(tokens) > context_length:\n tokens = tokens[:context_length] # Truncate\n tokens[-1] = eot_token\n result[i, :len(tokens)] = torch.tensor(tokens)\n\n return result" } ]
import json import logging import os import pathlib import re import torch from copy import deepcopy from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\ resize_pos_embed, get_cast_dtype from .coca_model import CoCa from .loss import ClipLoss, DistillClipLoss, CoCaLoss from .openai import load_openai_model from .pretrained import is_pretrained_cfg, get_pretrained_cfg, \ download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf from .transform import image_transform, AugmentationCfg, det_image_transform from .tokenizer import HFTokenizer, tokenize from open_clip import eva_clip from open_clip import eva_clip
13,347
def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) # detect old format and make compatible with new format if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): state_dict = convert_to_custom_text_state_dict(state_dict) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, cache_dir: Optional[str] = None, output_dict: Optional[bool] = None, require_pretrained: bool = False, ): has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) if has_hf_hub_prefix: model_id = model_name[len(HF_HUB_PREFIX):] checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir) with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.'
HF_HUB_PREFIX = 'hf-hub:' _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = ('.json',) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f'*{ext}')) for cf in config_files: with open(cf, 'r') as f: model_cfg = json.load(f) if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))} _rescan_model_configs() # initial populate of model config registry def list_models(): """ enumerate available model architectures based on config files """ return list(_MODEL_CONFIGS.keys()) def add_model_config(path): """ add model config path or file and update registry """ if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs() def get_model_config(model_name): if model_name in _MODEL_CONFIGS: return deepcopy(_MODEL_CONFIGS[model_name]) else: return None def get_tokenizer(model_name): if 'EVA' in model_name: return eva_clip.get_tokenizer(model_name) if model_name.startswith(HF_HUB_PREFIX): tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):]) else: config = get_model_config(model_name) tokenizer = HFTokenizer( config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize return tokenizer def load_state_dict(checkpoint_path: str, map_location='cpu'): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if next(iter(state_dict.items()))[0].startswith('module'): state_dict = {k[7:]: v for k, v in state_dict.items()} return state_dict def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) # detect old format and make compatible with new format if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'): state_dict = convert_to_custom_text_state_dict(state_dict) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: Optional[str] = None, precision: str = 'fp32', device: Union[str, torch.device] = 'cpu', jit: bool = False, force_quick_gelu: bool = False, force_custom_text: bool = False, force_patch_dropout: Optional[float] = None, force_image_size: Optional[Union[int, Tuple[int, int]]] = None, pretrained_image: bool = False, pretrained_hf: bool = True, cache_dir: Optional[str] = None, output_dict: Optional[bool] = None, require_pretrained: bool = False, ): has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX) if has_hf_hub_prefix: model_id = model_name[len(HF_HUB_PREFIX):] checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir) config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir) with open(config_path, 'r', encoding='utf-8') as f: config = json.load(f) pretrained_cfg = config['preprocess_cfg'] model_cfg = config['model_cfg'] else: model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names checkpoint_path = None pretrained_cfg = {} model_cfg = None if isinstance(device, str): device = torch.device(device) if pretrained == 'eva': return eva_clip.create_model(model_name=model_name, pretrained=cache_dir, force_custom_clip=True, precision=precision, device=device,) if pretrained and pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model( model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir, ) # to always output dict even if it is clip if output_dict and hasattr(model, "output_dict"): model.output_dict = True else: model_cfg = model_cfg or get_model_config(model_name) if model_cfg is not None: logging.info(f'Loaded {model_name} model config.') else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if force_patch_dropout is not None: # override the default patch dropout value model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout if force_image_size is not None: # override model config's image size model_cfg["vision_cfg"]["image_size"] = force_image_size if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' cast_dtype = get_cast_dtype(precision) is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {}) custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model if custom_text: if is_hf_model: model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf if "coca" in model_name: model = CoCa(**model_cfg, cast_dtype=cast_dtype) else: model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype) else: model = CLIP(**model_cfg, cast_dtype=cast_dtype) pretrained_loaded = False if pretrained: checkpoint_path = '' pretrained_cfg = get_pretrained_cfg(model_name, pretrained) if pretrained_cfg: checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: print(f'Loading pretrained {model_name} weights ({pretrained}).', flush=True) logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: error_str = ( f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
16
2023-12-09 05:43:08+00:00
16k
moonshot-admin/moonshot
third-party/tqdm-4.66.1/tqdm/auto.py
[ { "identifier": "TqdmExperimentalWarning", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class TqdmExperimentalWarning(TqdmWarning, FutureWarning):\n \"\"\"beta feature, unstable API and behaviour\"\"\"\n pass" }, { "identifier": "tqdm", "path": "third-party/tqdm-4.66.1/tqdm/asyncio.py", "snippet": "class tqdm_asyncio(std_tqdm):\n def __init__(self, iterable=None, *args, **kwargs):\n def __aiter__(self):\n async def __anext__(self):\n def send(self, *args, **kwargs):\n def as_completed(cls, fs, *, loop=None, timeout=None, total=None, **tqdm_kwargs):\n async def gather(cls, *fs, loop=None, timeout=None, total=None, **tqdm_kwargs):\n async def wrap_awaitable(i, f):\ndef tarange(*args, **kwargs):" }, { "identifier": "tqdm", "path": "third-party/tqdm-4.66.1/tqdm/std.py", "snippet": "class tqdm(Comparable):\n \"\"\"\n Decorate an iterable object, returning an iterator which acts exactly\n like the original iterable, but prints a dynamically updating\n progressbar every time a value is requested.\n\n Parameters\n ----------\n iterable : iterable, optional\n Iterable to decorate with a progressbar.\n Leave blank to manually manage the updates.\n desc : str, optional\n Prefix for the progressbar.\n total : int or float, optional\n The number of expected iterations. If unspecified,\n len(iterable) is used if possible. If float(\"inf\") or as a last\n resort, only basic progress statistics are displayed\n (no ETA, no progressbar).\n If `gui` is True and this parameter needs subsequent updating,\n specify an initial arbitrary large positive number,\n e.g. 9e9.\n leave : bool, optional\n If [default: True], keeps all traces of the progressbar\n upon termination of iteration.\n If `None`, will leave only if `position` is `0`.\n file : `io.TextIOWrapper` or `io.StringIO`, optional\n Specifies where to output the progress messages\n (default: sys.stderr). Uses `file.write(str)` and `file.flush()`\n methods. For encoding, see `write_bytes`.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes the progressbar to stay within this bound.\n If unspecified, attempts to use environment width. The\n fallback is a meter width of 10 and no limit for the counter and\n statistics. If 0, will not print any meter (only stats).\n mininterval : float, optional\n Minimum progress display update interval [default: 0.1] seconds.\n maxinterval : float, optional\n Maximum progress display update interval [default: 10] seconds.\n Automatically adjusts `miniters` to correspond to `mininterval`\n after long display update lag. Only works if `dynamic_miniters`\n or monitor thread is enabled.\n miniters : int or float, optional\n Minimum progress display update interval, in iterations.\n If 0 and `dynamic_miniters`, will automatically adjust to equal\n `mininterval` (more CPU efficient, good for tight loops).\n If > 0, will skip display of specified number of iterations.\n Tweak this and `mininterval` to get very efficient loops.\n If your progress is erratic with both fast and slow iterations\n (network, skipping items, etc) you should set miniters=1.\n ascii : bool or str, optional\n If unspecified or False, use unicode (smooth blocks) to fill\n the meter. The fallback is to use ASCII characters \" 123456789#\".\n disable : bool, optional\n Whether to disable the entire progressbar wrapper\n [default: False]. If set to None, disable on non-TTY.\n unit : str, optional\n String that will be used to define the unit of each iteration\n [default: it].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be reduced/scaled\n automatically and a metric prefix following the\n International System of Units standard will be added\n (kilo, mega, etc.) [default: False]. If any other non-zero\n number, will scale `total` and `n`.\n dynamic_ncols : bool, optional\n If set, constantly alters `ncols` and `nrows` to the\n environment (allowing for window resizes) [default: False].\n smoothing : float, optional\n Exponential moving average smoothing factor for speed estimates\n (ignored in GUI mode). Ranges from 0 (average speed) to 1\n (current/instantaneous speed) [default: 0.3].\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n initial : int or float, optional\n The initial counter value. Useful when restarting a progress\n bar [default: 0]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n position : int, optional\n Specify the line offset to print this bar (starting from 0)\n Automatic if unspecified.\n Useful to manage multiple bars at once (eg, from threads).\n postfix : dict or *, optional\n Specify additional stats to display at the end of the bar.\n Calls `set_postfix(**postfix)` if possible (dict).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n write_bytes : bool, optional\n Whether to write bytes. If (default: False) will write unicode.\n lock_args : tuple, optional\n Passed to `refresh` for intermediate output\n (initialisation, iterating, and updating).\n nrows : int, optional\n The screen height. If specified, hides nested bars outside this\n bound. If unspecified, attempts to use environment height.\n The fallback is 20.\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n delay : float, optional\n Don't display until [default: 0] seconds have elapsed.\n gui : bool, optional\n WARNING: internal parameter - do not use.\n Use tqdm.gui.tqdm(...) instead. If set, will attempt to use\n matplotlib animations for a graphical output [default: False].\n\n Returns\n -------\n out : decorated iterator.\n \"\"\"\n\n monitor_interval = 10 # set to 0 to disable the thread\n monitor = None\n _instances = WeakSet()\n\n @staticmethod\n def format_sizeof(num, suffix='', divisor=1000):\n \"\"\"\n Formats a number (greater than unity) with SI Order of Magnitude\n prefixes.\n\n Parameters\n ----------\n num : float\n Number ( >= 1) to format.\n suffix : str, optional\n Post-postfix [default: ''].\n divisor : float, optional\n Divisor between prefixes [default: 1000].\n\n Returns\n -------\n out : str\n Number with Order of Magnitude SI unit postfix.\n \"\"\"\n for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 999.5:\n if abs(num) < 99.95:\n if abs(num) < 9.995:\n return '{0:1.2f}'.format(num) + unit + suffix\n return '{0:2.1f}'.format(num) + unit + suffix\n return '{0:3.0f}'.format(num) + unit + suffix\n num /= divisor\n return '{0:3.1f}Y'.format(num) + suffix\n\n @staticmethod\n def format_interval(t):\n \"\"\"\n Formats a number of seconds as a clock time, [H:]MM:SS\n\n Parameters\n ----------\n t : int\n Number of seconds.\n\n Returns\n -------\n out : str\n [H:]MM:SS\n \"\"\"\n mins, s = divmod(int(t), 60)\n h, m = divmod(mins, 60)\n if h:\n return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)\n else:\n return '{0:02d}:{1:02d}'.format(m, s)\n\n @staticmethod\n def format_num(n):\n \"\"\"\n Intelligent scientific notation (.3g).\n\n Parameters\n ----------\n n : int or float or Numeric\n A Number.\n\n Returns\n -------\n out : str\n Formatted number.\n \"\"\"\n f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')\n n = str(n)\n return f if len(f) < len(n) else n\n\n @staticmethod\n def status_printer(file):\n \"\"\"\n Manage the printing and in-place updating of a line of characters.\n Note that if the string is longer than a line, then in-place\n updating may not work (it will print a new line at each refresh).\n \"\"\"\n fp = file\n fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover\n if fp in (sys.stderr, sys.stdout):\n getattr(sys.stderr, 'flush', lambda: None)()\n getattr(sys.stdout, 'flush', lambda: None)()\n\n def fp_write(s):\n fp.write(str(s))\n fp_flush()\n\n last_len = [0]\n\n def print_status(s):\n len_s = disp_len(s)\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n last_len[0] = len_s\n\n return print_status\n\n @staticmethod\n def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',\n unit_scale=False, rate=None, bar_format=None, postfix=None,\n unit_divisor=1000, initial=0, colour=None, **extra_kwargs):\n \"\"\"\n Return a string-based progress bar given some parameters\n\n Parameters\n ----------\n n : int or float\n Number of finished iterations.\n total : int or float\n The expected total number of iterations. If meaningless (None),\n only basic progress statistics are displayed (no ETA).\n elapsed : float\n Number of seconds passed since start.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes `{bar}` to stay within this bound\n [default: None]. If `0`, will not print any bar (only stats).\n The fallback is `{bar:10}`.\n prefix : str, optional\n Prefix message (included in total width) [default: ''].\n Use as {desc} in bar_format string.\n ascii : bool, optional or str, optional\n If not set, use unicode (smooth blocks) to fill the meter\n [default: False]. The fallback is to use ASCII characters\n \" 123456789#\".\n unit : str, optional\n The iteration unit [default: 'it'].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be printed with an\n appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)\n [default: False]. If any other non-zero number, will scale\n `total` and `n`.\n rate : float, optional\n Manual override for iteration rate.\n If [default: None], uses n/elapsed.\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n postfix : *, optional\n Similar to `prefix`, but placed at the end\n (e.g. for additional stats).\n Note: postfix is usually a string (not a dict) for this method,\n and will if possible be set to postfix = ', ' + postfix.\n However other types are supported (#382).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n initial : int or float, optional\n The initial counter value [default: 0].\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n\n Returns\n -------\n out : Formatted meter and stats, ready to display.\n \"\"\"\n\n # sanity check: total\n if total and n >= (total + 0.5): # allow float imprecision (#849)\n total = None\n\n # apply custom scale if necessary\n if unit_scale and unit_scale not in (True, 1):\n if total:\n total *= unit_scale\n n *= unit_scale\n if rate:\n rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt\n unit_scale = False\n\n elapsed_str = tqdm.format_interval(elapsed)\n\n # if unspecified, attempt to use rate = average speed\n # (we allow manual override since predicting time is an arcane art)\n if rate is None and elapsed:\n rate = (n - initial) / elapsed\n inv_rate = 1 / rate if rate else None\n format_sizeof = tqdm.format_sizeof\n rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else\n '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'\n rate_inv_fmt = (\n (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))\n if inv_rate else '?') + 's/' + unit\n rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt\n\n if unit_scale:\n n_fmt = format_sizeof(n, divisor=unit_divisor)\n total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'\n else:\n n_fmt = str(n)\n total_fmt = str(total) if total is not None else '?'\n\n try:\n postfix = ', ' + postfix if postfix else ''\n except TypeError:\n pass\n\n remaining = (total - n) / rate if rate and total else 0\n remaining_str = tqdm.format_interval(remaining) if rate else '?'\n try:\n eta_dt = (datetime.now() + timedelta(seconds=remaining)\n if rate and total else datetime.utcfromtimestamp(0))\n except OverflowError:\n eta_dt = datetime.max\n\n # format the stats displayed to the left and right sides of the bar\n if prefix:\n # old prefix setup work around\n bool_prefix_colon_already = (prefix[-2:] == \": \")\n l_bar = prefix if bool_prefix_colon_already else prefix + \": \"\n else:\n l_bar = ''\n\n r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'\n\n # Custom bar formatting\n # Populate a dict with all available progress indicators\n format_dict = {\n # slight extension of self.format_dict\n 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,\n 'elapsed': elapsed_str, 'elapsed_s': elapsed,\n 'ncols': ncols, 'desc': prefix or '', 'unit': unit,\n 'rate': inv_rate if inv_rate and inv_rate > 1 else rate,\n 'rate_fmt': rate_fmt, 'rate_noinv': rate,\n 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,\n 'rate_inv_fmt': rate_inv_fmt,\n 'postfix': postfix, 'unit_divisor': unit_divisor,\n 'colour': colour,\n # plus more useful definitions\n 'remaining': remaining_str, 'remaining_s': remaining,\n 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,\n **extra_kwargs}\n\n # total is known: we can predict some stats\n if total:\n # fractional and percentage progress\n frac = n / total\n percentage = frac * 100\n\n l_bar += '{0:3.0f}%|'.format(percentage)\n\n if ncols == 0:\n return l_bar[:-1] + r_bar[1:]\n\n format_dict.update(l_bar=l_bar)\n if bar_format:\n format_dict.update(percentage=percentage)\n\n # auto-remove colon for empty `{desc}`\n if not prefix:\n bar_format = bar_format.replace(\"{desc}: \", '')\n else:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar # no `{bar}`; nothing else to do\n\n # Formatting progress bar space available for bar's display\n full_bar = Bar(frac,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,\n colour=colour)\n if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):\n bar_format = str(bar_format)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n\n elif bar_format:\n # user-specified bar_format but no total\n l_bar += '|'\n format_dict.update(l_bar=l_bar, percentage=0)\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar\n full_bar = Bar(0,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.BLANK, colour=colour)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n else:\n # no total: no progressbar, ETA, just progress stats\n return (f'{(prefix + \": \") if prefix else \"\"}'\n f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')\n\n def __new__(cls, *_, **__):\n instance = object.__new__(cls)\n with cls.get_lock(): # also constructs lock if non-existent\n cls._instances.add(instance)\n # create monitoring thread\n if cls.monitor_interval and (cls.monitor is None\n or not cls.monitor.report()):\n try:\n cls.monitor = TMonitor(cls, cls.monitor_interval)\n except Exception as e: # pragma: nocover\n warn(\"tqdm:disabling monitor support\"\n \" (monitor_interval = 0) due to:\\n\" + str(e),\n TqdmMonitorWarning, stacklevel=2)\n cls.monitor_interval = 0\n return instance\n\n @classmethod\n def _get_free_pos(cls, instance=None):\n \"\"\"Skips specified instance.\"\"\"\n positions = {abs(inst.pos) for inst in cls._instances\n if inst is not instance and hasattr(inst, \"pos\")}\n return min(set(range(len(positions) + 1)).difference(positions))\n\n @classmethod\n def _decr_instances(cls, instance):\n \"\"\"\n Remove from list and reposition another unfixed bar\n to fill the new gap.\n\n This means that by default (where all nested bars are unfixed),\n order is not maintained but screen flicker/blank space is minimised.\n (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)\n \"\"\"\n with cls._lock:\n try:\n cls._instances.remove(instance)\n except KeyError:\n # if not instance.gui: # pragma: no cover\n # raise\n pass # py2: maybe magically removed already\n # else:\n if not instance.gui:\n last = (instance.nrows or 20) - 1\n # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)\n instances = list(filter(\n lambda i: hasattr(i, \"pos\") and last <= i.pos,\n cls._instances))\n # set first found to current `pos`\n if instances:\n inst = min(instances, key=lambda i: i.pos)\n inst.clear(nolock=True)\n inst.pos = abs(instance.pos)\n\n @classmethod\n def write(cls, s, file=None, end=\"\\n\", nolock=False):\n \"\"\"Print a message via tqdm (without overlap with bars).\"\"\"\n fp = file if file is not None else sys.stdout\n with cls.external_write_mode(file=file, nolock=nolock):\n # Write the message\n fp.write(s)\n fp.write(end)\n\n @classmethod\n @contextmanager\n def external_write_mode(cls, file=None, nolock=False):\n \"\"\"\n Disable tqdm within context and refresh tqdm when exits.\n Useful when writing to standard output stream\n \"\"\"\n fp = file if file is not None else sys.stdout\n\n try:\n if not nolock:\n cls.get_lock().acquire()\n # Clear all bars\n inst_cleared = []\n for inst in getattr(cls, '_instances', []):\n # Clear instance if in the target output file\n # or if write output + tqdm output are both either\n # sys.stdout or sys.stderr (because both are mixed in terminal)\n if hasattr(inst, \"start_t\") and (inst.fp == fp or all(\n f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):\n inst.clear(nolock=True)\n inst_cleared.append(inst)\n yield\n # Force refresh display of bars we cleared\n for inst in inst_cleared:\n inst.refresh(nolock=True)\n finally:\n if not nolock:\n cls._lock.release()\n\n @classmethod\n def set_lock(cls, lock):\n \"\"\"Set the global lock.\"\"\"\n cls._lock = lock\n\n @classmethod\n def get_lock(cls):\n \"\"\"Get the global lock. Construct it if it does not exist.\"\"\"\n if not hasattr(cls, '_lock'):\n cls._lock = TqdmDefaultWriteLock()\n return cls._lock\n\n @classmethod\n def pandas(cls, **tqdm_kwargs):\n \"\"\"\n Registers the current `tqdm` class with\n pandas.core.\n ( frame.DataFrame\n | series.Series\n | groupby.(generic.)DataFrameGroupBy\n | groupby.(generic.)SeriesGroupBy\n ).progress_apply\n\n A new instance will be created every time `progress_apply` is called,\n and each instance will automatically `close()` upon completion.\n\n Parameters\n ----------\n tqdm_kwargs : arguments for the tqdm instance\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from tqdm import tqdm\n >>> from tqdm.gui import tqdm as tqdm_gui\n >>>\n >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))\n >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc\n >>> # Now you can use `progress_apply` instead of `apply`\n >>> df.groupby(0).progress_apply(lambda x: x**2)\n\n References\n ----------\n <https://stackoverflow.com/questions/18603270/\\\n progress-indicator-during-pandas-operations-python>\n \"\"\"\n from warnings import catch_warnings, simplefilter\n\n from pandas.core.frame import DataFrame\n from pandas.core.series import Series\n try:\n with catch_warnings():\n simplefilter(\"ignore\", category=FutureWarning)\n from pandas import Panel\n except ImportError: # pandas>=1.2.0\n Panel = None\n Rolling, Expanding = None, None\n try: # pandas>=1.0.0\n from pandas.core.window.rolling import _Rolling_and_Expanding\n except ImportError:\n try: # pandas>=0.18.0\n from pandas.core.window import _Rolling_and_Expanding\n except ImportError: # pandas>=1.2.0\n try: # pandas>=1.2.0\n from pandas.core.window.expanding import Expanding\n from pandas.core.window.rolling import Rolling\n _Rolling_and_Expanding = Rolling, Expanding\n except ImportError: # pragma: no cover\n _Rolling_and_Expanding = None\n try: # pandas>=0.25.0\n from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy\n from pandas.core.groupby.generic import DataFrameGroupBy\n except ImportError: # pragma: no cover\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy\n except ImportError:\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import GroupBy\n except ImportError: # pragma: no cover\n from pandas.core.groupby import GroupBy\n\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import PanelGroupBy\n except ImportError:\n try:\n from pandas.core.groupby import PanelGroupBy\n except ImportError: # pandas>=0.25.0\n PanelGroupBy = None\n\n tqdm_kwargs = tqdm_kwargs.copy()\n deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]\n\n def inner_generator(df_function='apply'):\n def inner(df, func, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n df : (DataFrame|Series)[GroupBy]\n Data (may be grouped).\n func : function\n To be applied on the (grouped) data.\n **kwargs : optional\n Transmitted to `df.apply()`.\n \"\"\"\n\n # Precompute total iterations\n total = tqdm_kwargs.pop(\"total\", getattr(df, 'ngroups', None))\n if total is None: # not grouped\n if df_function == 'applymap':\n total = df.size\n elif isinstance(df, Series):\n total = len(df)\n elif (_Rolling_and_Expanding is None or\n not isinstance(df, _Rolling_and_Expanding)):\n # DataFrame or Panel\n axis = kwargs.get('axis', 0)\n if axis == 'index':\n axis = 0\n elif axis == 'columns':\n axis = 1\n # when axis=0, total is shape[axis1]\n total = df.size // df.shape[axis]\n\n # Init bar\n if deprecated_t[0] is not None:\n t = deprecated_t[0]\n deprecated_t[0] = None\n else:\n t = cls(total=total, **tqdm_kwargs)\n\n if len(args) > 0:\n # *args intentionally not supported (see #244, #299)\n TqdmDeprecationWarning(\n \"Except func, normal arguments are intentionally\" +\n \" not supported by\" +\n \" `(DataFrame|Series|GroupBy).progress_apply`.\" +\n \" Use keyword arguments instead.\",\n fp_write=getattr(t.fp, 'write', sys.stderr.write))\n\n try: # pandas>=1.3.0\n from pandas.core.common import is_builtin_func\n except ImportError:\n is_builtin_func = df._is_builtin_func\n try:\n func = is_builtin_func(func)\n except TypeError:\n pass\n\n # Define bar updating wrapper\n def wrapper(*args, **kwargs):\n # update tbar correctly\n # it seems `pandas apply` calls `func` twice\n # on the first column/row to decide whether it can\n # take a fast or slow code path; so stop when t.total==t.n\n t.update(n=1 if not t.total or t.n < t.total else 0)\n return func(*args, **kwargs)\n\n # Apply the provided function (in **kwargs)\n # on the df using our wrapper (which provides bar updating)\n try:\n return getattr(df, df_function)(wrapper, **kwargs)\n finally:\n t.close()\n\n return inner\n\n # Monkeypatch pandas to provide easy methods\n # Enable custom tqdm progress in pandas!\n Series.progress_apply = inner_generator()\n SeriesGroupBy.progress_apply = inner_generator()\n Series.progress_map = inner_generator('map')\n SeriesGroupBy.progress_map = inner_generator('map')\n\n DataFrame.progress_apply = inner_generator()\n DataFrameGroupBy.progress_apply = inner_generator()\n DataFrame.progress_applymap = inner_generator('applymap')\n\n if Panel is not None:\n Panel.progress_apply = inner_generator()\n if PanelGroupBy is not None:\n PanelGroupBy.progress_apply = inner_generator()\n\n GroupBy.progress_apply = inner_generator()\n GroupBy.progress_aggregate = inner_generator('aggregate')\n GroupBy.progress_transform = inner_generator('transform')\n\n if Rolling is not None and Expanding is not None:\n Rolling.progress_apply = inner_generator()\n Expanding.progress_apply = inner_generator()\n elif _Rolling_and_Expanding is not None:\n _Rolling_and_Expanding.progress_apply = inner_generator()\n\n # override defaults via env vars\n @envwrap(\"TQDM_\", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,\n 'position': int, 'nrows': int})\n def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,\n ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,\n ascii=None, disable=False, unit='it', unit_scale=False,\n dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,\n position=None, postfix=None, unit_divisor=1000, write_bytes=False,\n lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,\n **kwargs):\n \"\"\"see tqdm.tqdm for arguments\"\"\"\n if file is None:\n file = sys.stderr\n\n if write_bytes:\n # Despite coercing unicode into bytes, py2 sys.std* streams\n # should have bytes written to them.\n file = SimpleTextIOWrapper(\n file, encoding=getattr(file, 'encoding', None) or 'utf-8')\n\n file = DisableOnWriteError(file, tqdm_instance=self)\n\n if disable is None and hasattr(file, \"isatty\") and not file.isatty():\n disable = True\n\n if total is None and iterable is not None:\n try:\n total = len(iterable)\n except (TypeError, AttributeError):\n total = None\n if total == float(\"inf\"):\n # Infinite iterations, behave same as unknown\n total = None\n\n if disable:\n self.iterable = iterable\n self.disable = disable\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n self.n = initial\n self.total = total\n self.leave = leave\n return\n\n if kwargs:\n self.disable = True\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n raise (\n TqdmDeprecationWarning(\n \"`nested` is deprecated and automated.\\n\"\n \"Use `position` instead for manual control.\\n\",\n fp_write=getattr(file, 'write', sys.stderr.write))\n if \"nested\" in kwargs else\n TqdmKeyError(\"Unknown argument(s): \" + str(kwargs)))\n\n # Preprocess the arguments\n if (\n (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))\n ) or dynamic_ncols: # pragma: no cover\n if dynamic_ncols:\n dynamic_ncols = _screen_shape_wrapper()\n if dynamic_ncols:\n ncols, nrows = dynamic_ncols(file)\n else:\n _dynamic_ncols = _screen_shape_wrapper()\n if _dynamic_ncols:\n _ncols, _nrows = _dynamic_ncols(file)\n if ncols is None:\n ncols = _ncols\n if nrows is None:\n nrows = _nrows\n\n if miniters is None:\n miniters = 0\n dynamic_miniters = True\n else:\n dynamic_miniters = False\n\n if mininterval is None:\n mininterval = 0\n\n if maxinterval is None:\n maxinterval = 0\n\n if ascii is None:\n ascii = not _supports_unicode(file)\n\n if bar_format and ascii is not True and not _is_ascii(ascii):\n # Convert bar format into unicode since terminal uses unicode\n bar_format = str(bar_format)\n\n if smoothing is None:\n smoothing = 0\n\n # Store the arguments\n self.iterable = iterable\n self.desc = desc or ''\n self.total = total\n self.leave = leave\n self.fp = file\n self.ncols = ncols\n self.nrows = nrows\n self.mininterval = mininterval\n self.maxinterval = maxinterval\n self.miniters = miniters\n self.dynamic_miniters = dynamic_miniters\n self.ascii = ascii\n self.disable = disable\n self.unit = unit\n self.unit_scale = unit_scale\n self.unit_divisor = unit_divisor\n self.initial = initial\n self.lock_args = lock_args\n self.delay = delay\n self.gui = gui\n self.dynamic_ncols = dynamic_ncols\n self.smoothing = smoothing\n self._ema_dn = EMA(smoothing)\n self._ema_dt = EMA(smoothing)\n self._ema_miniters = EMA(smoothing)\n self.bar_format = bar_format\n self.postfix = None\n self.colour = colour\n self._time = time\n if postfix:\n try:\n self.set_postfix(refresh=False, **postfix)\n except TypeError:\n self.postfix = postfix\n\n # Init the iterations counters\n self.last_print_n = initial\n self.n = initial\n\n # if nested, at initial sp() call we replace '\\r' by '\\n' to\n # not overwrite the outer progress bar\n with self._lock:\n # mark fixed positions as negative\n self.pos = self._get_free_pos(self) if position is None else -position\n\n if not gui:\n # Initialize the screen printer\n self.sp = self.status_printer(self.fp)\n if delay <= 0:\n self.refresh(lock_args=self.lock_args)\n\n # Init the time counter\n self.last_print_t = self._time()\n # NB: Avoid race conditions by setting start_t at the very end of init\n self.start_t = self.last_print_t\n\n def __bool__(self):\n if self.total is not None:\n return self.total > 0\n if self.iterable is None:\n raise TypeError('bool() undefined when iterable == total == None')\n return bool(self.iterable)\n\n def __len__(self):\n return (\n self.total if self.iterable is None\n else self.iterable.shape[0] if hasattr(self.iterable, \"shape\")\n else len(self.iterable) if hasattr(self.iterable, \"__len__\")\n else self.iterable.__length_hint__() if hasattr(self.iterable, \"__length_hint__\")\n else getattr(self, \"total\", None))\n\n def __reversed__(self):\n try:\n orig = self.iterable\n except AttributeError:\n raise TypeError(\"'tqdm' object is not reversible\")\n else:\n self.iterable = reversed(self.iterable)\n return self.__iter__()\n finally:\n self.iterable = orig\n\n def __contains__(self, item):\n contains = getattr(self.iterable, '__contains__', None)\n return contains(item) if contains is not None else item in self.__iter__()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n self.close()\n except AttributeError:\n # maybe eager thread cleanup upon external error\n if (exc_type, exc_value, traceback) == (None, None, None):\n raise\n warn(\"AttributeError ignored\", TqdmWarning, stacklevel=2)\n\n def __del__(self):\n self.close()\n\n def __str__(self):\n return self.format_meter(**self.format_dict)\n\n @property\n def _comparable(self):\n return abs(getattr(self, \"pos\", 1 << 31))\n\n def __hash__(self):\n return id(self)\n\n def __iter__(self):\n \"\"\"Backward-compatibility to use: for x in tqdm(iterable)\"\"\"\n\n # Inlining instance variables as locals (speed optimisation)\n iterable = self.iterable\n\n # If the bar is disabled, then just walk the iterable\n # (note: keep this check outside the loop for performance)\n if self.disable:\n for obj in iterable:\n yield obj\n return\n\n mininterval = self.mininterval\n last_print_t = self.last_print_t\n last_print_n = self.last_print_n\n min_start_t = self.start_t + self.delay\n n = self.n\n time = self._time\n\n try:\n for obj in iterable:\n yield obj\n # Update and possibly print the progressbar.\n # Note: does not call self.update(1) for speed optimisation.\n n += 1\n\n if n - last_print_n >= self.miniters:\n cur_t = time()\n dt = cur_t - last_print_t\n if dt >= mininterval and cur_t >= min_start_t:\n self.update(n - last_print_n)\n last_print_n = self.last_print_n\n last_print_t = self.last_print_t\n finally:\n self.n = n\n self.close()\n\n def update(self, n=1):\n \"\"\"\n Manually update the progress bar, useful for streams\n such as reading files.\n E.g.:\n >>> t = tqdm(total=filesize) # Initialise\n >>> for current_buffer in stream:\n ... ...\n ... t.update(len(current_buffer))\n >>> t.close()\n The last line is highly recommended, but possibly not necessary if\n `t.update()` will be called in such a way that `filesize` will be\n exactly reached and printed.\n\n Parameters\n ----------\n n : int or float, optional\n Increment to add to the internal counter of iterations\n [default: 1]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n\n Returns\n -------\n out : bool or None\n True if a `display()` was triggered.\n \"\"\"\n if self.disable:\n return\n\n if n < 0:\n self.last_print_n += n # for auto-refresh logic to work\n self.n += n\n\n # check counter first to reduce calls to time()\n if self.n - self.last_print_n >= self.miniters:\n cur_t = self._time()\n dt = cur_t - self.last_print_t\n if dt >= self.mininterval and cur_t >= self.start_t + self.delay:\n cur_t = self._time()\n dn = self.n - self.last_print_n # >= n\n if self.smoothing and dt and dn:\n # EMA (not just overall average)\n self._ema_dn(dn)\n self._ema_dt(dt)\n self.refresh(lock_args=self.lock_args)\n if self.dynamic_miniters:\n # If no `miniters` was specified, adjust automatically to the\n # maximum iteration rate seen so far between two prints.\n # e.g.: After running `tqdm.update(5)`, subsequent\n # calls to `tqdm.update()` will only cause an update after\n # at least 5 more iterations.\n if self.maxinterval and dt >= self.maxinterval:\n self.miniters = dn * (self.mininterval or self.maxinterval) / dt\n elif self.smoothing:\n # EMA miniters update\n self.miniters = self._ema_miniters(\n dn * (self.mininterval / dt if self.mininterval and dt\n else 1))\n else:\n # max iters between two prints\n self.miniters = max(self.miniters, dn)\n\n # Store old values for next call\n self.last_print_n = self.n\n self.last_print_t = cur_t\n return True\n\n def close(self):\n \"\"\"Cleanup and (if leave=False) close the progressbar.\"\"\"\n if self.disable:\n return\n\n # Prevent multiple closures\n self.disable = True\n\n # decrement instance pos and remove from internal set\n pos = abs(self.pos)\n self._decr_instances(self)\n\n if self.last_print_t < self.start_t + self.delay:\n # haven't ever displayed; nothing to clear\n return\n\n # GUI mode\n if getattr(self, 'sp', None) is None:\n return\n\n # annoyingly, _supports_unicode isn't good enough\n def fp_write(s):\n self.fp.write(str(s))\n\n try:\n fp_write('')\n except ValueError as e:\n if 'closed' in str(e):\n return\n raise # pragma: no cover\n\n leave = pos == 0 if self.leave is None else self.leave\n\n with self._lock:\n if leave:\n # stats for overall rate (no weighted average)\n self._ema_dt = lambda: None\n self.display(pos=0)\n fp_write('\\n')\n else:\n # clear previous display\n if self.display(msg='', pos=pos) and not pos:\n fp_write('\\r')\n\n def clear(self, nolock=False):\n \"\"\"Clear current bar display.\"\"\"\n if self.disable:\n return\n\n if not nolock:\n self._lock.acquire()\n pos = abs(self.pos)\n if pos < (self.nrows or 20):\n self.moveto(pos)\n self.sp('')\n self.fp.write('\\r') # place cursor back at the beginning of line\n self.moveto(-pos)\n if not nolock:\n self._lock.release()\n\n def refresh(self, nolock=False, lock_args=None):\n \"\"\"\n Force refresh the display of this bar.\n\n Parameters\n ----------\n nolock : bool, optional\n If `True`, does not lock.\n If [default: `False`]: calls `acquire()` on internal lock.\n lock_args : tuple, optional\n Passed to internal lock's `acquire()`.\n If specified, will only `display()` if `acquire()` returns `True`.\n \"\"\"\n if self.disable:\n return\n\n if not nolock:\n if lock_args:\n if not self._lock.acquire(*lock_args):\n return False\n else:\n self._lock.acquire()\n self.display()\n if not nolock:\n self._lock.release()\n return True\n\n def unpause(self):\n \"\"\"Restart tqdm timer from last print time.\"\"\"\n if self.disable:\n return\n cur_t = self._time()\n self.start_t += cur_t - self.last_print_t\n self.last_print_t = cur_t\n\n def reset(self, total=None):\n \"\"\"\n Resets to 0 iterations for repeated use.\n\n Consider combining with `leave=True`.\n\n Parameters\n ----------\n total : int or float, optional. Total to use for the new bar.\n \"\"\"\n self.n = 0\n if total is not None:\n self.total = total\n if self.disable:\n return\n self.last_print_n = 0\n self.last_print_t = self.start_t = self._time()\n self._ema_dn = EMA(self.smoothing)\n self._ema_dt = EMA(self.smoothing)\n self._ema_miniters = EMA(self.smoothing)\n self.refresh()\n\n def set_description(self, desc=None, refresh=True):\n \"\"\"\n Set/modify description of the progress bar.\n\n Parameters\n ----------\n desc : str, optional\n refresh : bool, optional\n Forces refresh [default: True].\n \"\"\"\n self.desc = desc + ': ' if desc else ''\n if refresh:\n self.refresh()\n\n def set_description_str(self, desc=None, refresh=True):\n \"\"\"Set/modify description without ': ' appended.\"\"\"\n self.desc = desc or ''\n if refresh:\n self.refresh()\n\n def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):\n \"\"\"\n Set/modify postfix (additional stats)\n with automatic formatting based on datatype.\n\n Parameters\n ----------\n ordered_dict : dict or OrderedDict, optional\n refresh : bool, optional\n Forces refresh [default: True].\n kwargs : dict, optional\n \"\"\"\n # Sort in alphabetical order to be more deterministic\n postfix = OrderedDict([] if ordered_dict is None else ordered_dict)\n for key in sorted(kwargs.keys()):\n postfix[key] = kwargs[key]\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = self.format_num(postfix[key])\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n # Stitch together to get the final postfix\n self.postfix = ', '.join(key + '=' + postfix[key].strip()\n for key in postfix.keys())\n if refresh:\n self.refresh()\n\n def set_postfix_str(self, s='', refresh=True):\n \"\"\"\n Postfix without dictionary expansion, similar to prefix handling.\n \"\"\"\n self.postfix = str(s)\n if refresh:\n self.refresh()\n\n def moveto(self, n):\n # TODO: private method\n self.fp.write('\\n' * n + _term_move_up() * -n)\n getattr(self.fp, 'flush', lambda: None)()\n\n @property\n def format_dict(self):\n \"\"\"Public API for read-only member access.\"\"\"\n if self.disable and not hasattr(self, 'unit'):\n return defaultdict(lambda: None, {\n 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})\n if self.dynamic_ncols:\n self.ncols, self.nrows = self.dynamic_ncols(self.fp)\n return {\n 'n': self.n, 'total': self.total,\n 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,\n 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,\n 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,\n 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,\n 'bar_format': self.bar_format, 'postfix': self.postfix,\n 'unit_divisor': self.unit_divisor, 'initial': self.initial,\n 'colour': self.colour}\n\n def display(self, msg=None, pos=None):\n \"\"\"\n Use `self.sp` to display `msg` in the specified `pos`.\n\n Consider overloading this function when inheriting to use e.g.:\n `self.some_frontend(**self.format_dict)` instead of `self.sp`.\n\n Parameters\n ----------\n msg : str, optional. What to display (default: `repr(self)`).\n pos : int, optional. Position to `moveto`\n (default: `abs(self.pos)`).\n \"\"\"\n if pos is None:\n pos = abs(self.pos)\n\n nrows = self.nrows or 20\n if pos >= nrows - 1:\n if pos >= nrows:\n return False\n if msg or msg is None: # override at `nrows - 1`\n msg = \" ... (more hidden) ...\"\n\n if not hasattr(self, \"sp\"):\n raise TqdmDeprecationWarning(\n \"Please use `tqdm.gui.tqdm(...)`\"\n \" instead of `tqdm(..., gui=True)`\\n\",\n fp_write=getattr(self.fp, 'write', sys.stderr.write))\n\n if pos:\n self.moveto(pos)\n self.sp(self.__str__() if msg is None else msg)\n if pos:\n self.moveto(-pos)\n return True\n\n @classmethod\n @contextmanager\n def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):\n \"\"\"\n stream : file-like object.\n method : str, \"read\" or \"write\". The result of `read()` and\n the first argument of `write()` should have a `len()`.\n\n >>> with tqdm.wrapattr(file_obj, \"read\", total=file_obj.size) as fobj:\n ... while True:\n ... chunk = fobj.read(chunk_size)\n ... if not chunk:\n ... break\n \"\"\"\n with cls(total=total, **tqdm_kwargs) as t:\n if bytes:\n t.unit = \"B\"\n t.unit_scale = True\n t.unit_divisor = 1024\n yield CallbackIOWrapper(t.update, stream, method)" } ]
import warnings from .std import TqdmExperimentalWarning from .autonotebook import tqdm as notebook_tqdm from .asyncio import tqdm as asyncio_tqdm from .std import tqdm as std_tqdm
12,677
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings():
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings():
warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
0
2023-12-14 07:43:03+00:00
16k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Protocol\"] = self.readInt()\n fields[\"KeyVersion\"] = self.readInt()\n fields[\"MajorVersion\"] = self.readInt()\n fields[\"MinorVersion\"] = self.readInt()\n fields[\"Build\"] = self.readInt()\n fields[\"ContentHash\"] = self.readString()\n fields[\"DeviceType\"] = self.readInt()\n fields[\"AppStore\"] = self.readInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20100, fields, cryptoInit)\n\n def getMessageType(self):\n return 10100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginMessage", "path": "Heart/Packets/Client/Authentification/LoginMessage.py", "snippet": "class LoginMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"ClientMajor\"] = self.readInt()\n fields[\"ClientMinor\"] = self.readInt()\n fields[\"ClientBuild\"] = self.readInt()\n fields[\"ResourceSha\"] = self.readString()\n fields[\"Device\"] = self.readString()\n fields[\"PreferredLanguage\"] = self.readDataReference()\n fields[\"PreferredDeviceLanguage\"] = self.readString()\n fields[\"OSVersion\"] = self.readString()\n fields[\"isAndroid\"] = self.readBoolean()\n fields[\"IMEI\"] = self.readString()\n fields[\"AndroidID\"] = self.readString()\n fields[\"isAdvertisingEnabled\"] = self.readBoolean()\n fields[\"AppleIFV\"] = self.readString()\n fields[\"RndKey\"] = self.readInt()\n fields[\"AppStore\"] = self.readVInt()\n fields[\"ClientVersion\"] = self.readString()\n fields[\"TencentOpenId\"] = self.readString()\n fields[\"TencentToken\"] = self.readString()\n fields[\"TencentPlatform\"] = self.readVInt()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n fields[\"AppLicensingSignature\"] = self.readString()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n if fields[\"ClientMajor\"]==53:\n calling_instance.player.ClientVersion = f'{str(fields[\"ClientMajor\"])}.{str(fields[\"ClientBuild\"])}.{str(fields[\"ClientMinor\"])}'\n fields[\"Socket\"] = calling_instance.client\n db_instance = DatabaseHandler()\n if db_instance.playerExist(fields[\"PassToken\"], fields[\"AccountID\"]):\n player_data = json.loads(db_instance.getPlayerEntry(fields[\"AccountID\"])[2])\n db_instance.loadAccount(calling_instance.player, fields[\"AccountID\"])\n else:\n db_instance.createAccount(calling_instance.player.getDataTemplate(fields[\"AccountID\"][0], fields[\"AccountID\"][1], fields[\"PassToken\"]))\n ClientsManager.AddPlayer(calling_instance.player.ID, calling_instance.client)\n Messaging.sendMessage(20104, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24399, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForBattleEndMessage", "path": "Heart/Packets/Client/Battle/AskForBattleEndMessage.py", "snippet": "class AskForBattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Unk1\"] = self.readVInt()\n fields[\"Result\"] = self.readVInt()\n fields[\"Rank\"] = self.readVInt()\n fields[\"MapID\"] = self.readDataReference()\n fields[\"HeroesCount\"] = self.readVInt()\n fields[\"Heroes\"] = []\n for i in range(fields[\"HeroesCount\"]): fields[\"Heroes\"].append({\"Brawler\": {\"ID\": self.readDataReference(), \"SkinID\": self.readDataReference()}, \"Team\": self.readVInt(), \"IsPlayer\": self.readBoolean(), \"PlayerName\": self.readString()})\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(23456, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14110\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ChangeAvatarNameMessage", "path": "Heart/Packets/Client/Home/ChangeAvatarNameMessage.py", "snippet": "class ChangeAvatarNameMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeString(fields[\"Name\"])\n self.writeBoolean(fields[\"NameSetByUser\"])\n\n def decode(self):\n fields = {}\n fields[\"Name\"] = self.readString()\n fields[\"NameSetByUser\"] = self.readBoolean()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n db_instance = DatabaseHandler()\n playerData = db_instance.getPlayer(calling_instance.player.ID)\n playerData[\"Name\"] = fields[\"Name\"]\n playerData[\"Registered\"] = True\n db_instance.updatePlayerData(playerData, calling_instance)\n fields[\"Socket\"] = calling_instance.client\n fields[\"Command\"] = {\"ID\": 201}\n Messaging.sendMessage(24111, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10212\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "EndClientTurnMessage", "path": "Heart/Packets/Client/Home/EndClientTurnMessage.py", "snippet": "class EndClientTurnMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n fields[\"Tick\"] = self.readVInt()\n fields[\"Checksum\"] = self.readVInt()\n fields[\"CommandsCount\"] = self.readVInt()\n super().decode(fields)\n fields[\"Commands\"] = []\n for i in range(fields[\"CommandsCount\"]):\n fields[\"Commands\"].append({\"ID\": self.readVInt()})\n if LogicCommandManager.commandExist(fields[\"Commands\"][i][\"ID\"]):\n command = LogicCommandManager.createCommand(fields[\"Commands\"][i][\"ID\"])\n print(\"Command\", LogicCommandManager.getCommandsName(fields[\"Commands\"][i][\"ID\"]))\n if command is not None:\n fields[\"Commands\"][i][\"Fields\"] = command.decode(self)\n fields[\"Commands\"][i][\"Instance\"] = command\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n for command in fields[\"Commands\"]:\n if \"Instance\" not in command.keys():\n return\n\n if hasattr(command[\"Instance\"], 'execute'):\n command[\"Instance\"].execute(calling_instance, command[\"Fields\"], cryptoInit)\n if command[\"ID\"] == 519:\n Messaging.sendMessage(24104, {\"Socket\": calling_instance.client, \"ServerChecksum\": 0, \"ClientChecksum\": 0, \"Tick\": 0}, cryptoInit)\n\n def getMessageType(self):\n return 14102\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeFromOfflinePractiseMessage", "path": "Heart/Packets/Client/Home/GoHomeFromOfflinePractiseMessage.py", "snippet": "class GoHomeFromOfflinePractiseMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14109\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeMessage", "path": "Heart/Packets/Client/Home/GoHomeMessage.py", "snippet": "class GoHomeMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 17750\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GetPlayerProfileMessage", "path": "Heart/Packets/Client/Home/GetPlayerProfileMessage.py", "snippet": "class GetPlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"BattleInfoBoolean\"] = self.readBoolean()\n if fields[\"BattleInfoBoolean\"]:\n fields[\"unk1\"] = self.readVInt()\n fields[\"AnotherID\"] = self.readLong()\n fields[\"unk2\"] = self.readVInt()\n for i in self.readVInt():\n fields[\"CsvID\"] = self.readDataReference()\n fields[\"unk3\"] = self.readVInt()\n fields[\"unk4\"] = self.readVInt()\n fields[\"unk5\"] = self.readVInt()\n fields[\"unk6\"] = self.readVInt()\n fields[\"PlayerName\"] = self.readString()\n fields[\"unk7\"] = self.readVInt()\n fields[\"Thumbnail\"] = self.readVInt()\n fields[\"NameColor\"] = self.readVInt()\n fields[\"unk10\"] = self.readVInt()\n fields[\"unk11\"] = self.readVInt()\n fields[\"PlayerHighID\"] = self.readInt()\n fields[\"PlayerLowID\"] = self.readInt()\n super().decode(fields)\n\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24113, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 15081\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForAllianceDataMessage", "path": "Heart/Packets/Client/Home/AskForAllianceDataMessage.py", "snippet": "class AskForAllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"id\"] = self.readVLong()\n fields[\"isInAlliance\"] = self.readBoolean()\n if fields[\"isInAlliance\"] == True:\n fields[\"anotherIDHigh\"] = self.readVInt()\n fields[\"anotherIDLow\"] = self.readVInt()\n super().decode(fields)\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24301, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14302\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveMessage", "path": "Heart/Packets/Client/Socket/KeepAliveMessage.py", "snippet": "class KeepAliveMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20108, fields, cryptoInit)\n\n def getMessageType(self):\n return 10108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginFailedMessage", "path": "Heart/Packets/Server/Authentification/LoginFailedMessage.py", "snippet": "class LoginFailedMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeInt(fields['ErrorID'])\n self.writeString(fields['FingerprintData'])\n self.writeString()\n self.writeString(fields['ContentURL'])\n self.writeString()\n self.writeString(fields['Message'])\n self.writeInt(0)\n self.writeBoolean(False)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeString()\n self.writeVInt(0)\n self.writeString()\n self.writeBoolean(False)\n\n def decode(self):\n fields = {}\n fields[\"ErrorCode\"] = self.readInt()\n fields[\"ResourceFingerprintData\"] = self.readString()\n fields[\"RedirectDomain\"] = self.readString()\n fields[\"ContentURL\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"Reason\"] = self.readString()\n fields[\"SecondsUntilMaintenanceEnd\"] = self.readInt()\n fields[\"ShowContactSupportForBan\"] = self.readBoolean()\n fields[\"CompressedFingerprintData\"] = self.readBytesWithoutLength()\n fields[\"ContentURLListCount\"] = self.readInt()\n fields[\"ContentURLList\"] = []\n for i in range(fields[\"ContentURLListCount\"]):\n fields[\"ContentURLList\"].append(self.readString())\n fields[\"KunlunAppStore\"] = self.readInt()\n fields[\"MaintenanceType\"] = self.readInt()\n fields[\"HelpshiftFaqId\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"Unk1\"] = self.readBoolean()\n fields[\"Unk2\"] = self.readBoolean()\n fields[\"Unk3\"] = self.readString()\n fields[\"Unk4\"] = self.readVInt()\n fields[\"Unk5\"] = self.readString()\n fields[\"OptionalTargetedAccountIdState\"] = self.readBoolean()\n if fields[\"OptionalTargetedAccountIdState\"] == True:\n fields[\"OptionalTargetedAccountId\"] = self.readLong()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20103\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginOkMessage", "path": "Heart/Packets/Server/Authentification/LoginOkMessage.py", "snippet": "class LoginOkMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 1\n\n def encode(self, fields, player):\n self.writeLong(player.ID[0], player.ID[1])\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(player.Token)\n self.writeString()\n self.writeString()\n self.writeInt(53)\n self.writeInt(176)\n self.writeInt(1)\n self.writeString(\"dev\")\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeString(\"RU\")\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeInt(2)\n self.writeString('https://game-assets.brawlstarsgame.com')\n self.writeString('http://a678dbc1c015a893c9fd-4e8cc3b1ad3a3c940c504815caefa967.r87.cf2.rackcdn.com')\n self.writeInt(2)\n self.writeString('https://event-assets.brawlstars.com')\n self.writeString('https://24b999e6da07674e22b0-8209975788a0f2469e68e84405ae4fcf.ssl.cf2.rackcdn.com/event-assets')\n self.writeVInt(0)\n self.writeCompressedString(b'')\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeString('https://play.google.com/store/apps/details?id=com.supercell.brawlstars')\n self.writeString()\n self.writeBoolean(False)\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"HomeID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"FacebookID\"] = self.readString()\n fields[\"GamecenterID\"] = self.readString()\n fields[\"ServerMajorVersion\"] = self.readInt()\n fields[\"ContentVersion\"] = self.readInt()\n fields[\"ServerBuild\"] = self.readInt()\n fields[\"ServerEnvironment\"] = self.readString()\n fields[\"SessionCount\"] = self.readInt()\n fields[\"PlayTimeSeconds\"] = self.readInt()\n fields[\"DaysSinceStartedPlaying\"] = self.readInt()\n fields[\"FacebookAppID\"] = self.readString()\n fields[\"ServerTime\"] = self.readString()\n fields[\"AccountCreatedDate\"] = self.readString()\n fields[\"StartupCooldownSeconds\"] = self.readInt()\n fields[\"GoogleServiceID\"] = self.readString()\n fields[\"LoginCountry\"] = self.readString()\n fields[\"KunlunID\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"TencentID\"] = self.readString()\n\n ContentUrlCount = self.readInt()\n fields[\"GameAssetsUrls\"] = []\n for i in range(ContentUrlCount):\n fields[\"GameAssetsUrls\"].append(self.readString())\n\n EventUrlCount = self.readInt()\n fields[\"EventAssetsUrls\"] = []\n for i in range(EventUrlCount):\n fields[\"EventAssetsUrls\"].append(self.readString())\n\n fields[\"SecondsUntilAccountDeletion\"] = self.readVInt()\n fields[\"SupercellIDToken\"] = self.readCompressedString()\n fields[\"IsSupercellIDLogoutAllDevicesAllowed\"] = self.readBoolean()\n fields[\"isSupercellIDEligible\"] = self.readBoolean()\n fields[\"LineID\"] = self.readString()\n fields[\"SessionID\"] = self.readString()\n fields[\"KakaoID\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"YoozooPayNotifyUrl\"] = self.readString()\n fields[\"UnbotifyEnabled\"] = self.readBoolean()\n\n Unknown1 = self.readBoolean()\n fields[\"Unknown1\"] = Unknown1\n if Unknown1:\n fields[\"Unknown2\"] = self.readString()\n\n Unknown3 = self.readBoolean()\n fields[\"Unknown3\"] = Unknown1\n if Unknown3:\n fields[\"Unknown4\"] = self.readString()\n\n Unknown5 = self.readBoolean()\n fields[\"Unknown5\"] = Unknown1\n if Unknown5:\n fields[\"Unknown6\"] = self.readString()\n\n Unknown7 = self.readBoolean()\n fields[\"Unknown7\"] = Unknown1\n if Unknown7:\n fields[\"Unknown8\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OutOfSyncMessage", "path": "Heart/Packets/Server/Authentification/OutOfSyncMessage.py", "snippet": "class OutOfSyncMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeVInt(fields[\"ServerChecksum\"])\n self.writeVInt(fields[\"ClientChecksum\"])\n self.writeVInt(fields[\"Tick\"])\n\n def decode(self):\n fields = {}\n fields[\"ServerChecksum\"] = self.readVInt()\n fields[\"ClientChecksum\"] = self.readVInt()\n fields[\"Tick\"] = self.readVInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ServerHelloMessage", "path": "Heart/Packets/Server/Authentification/ServerHelloMessage.py", "snippet": "class ServerHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeBytes(urandom(24), 24)\n\n def decode(self):\n fields = {}\n fields[\"Random\"] = self.readBytesWithoutLength()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "BattleEndMessage", "path": "Heart/Packets/Server/Battle/BattleEndMessage.py", "snippet": "class BattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeLong(0, 0) # Battle UUID High\n self.writeLong(0, 0) # Battle UUID Low\n self.writeVInt(2) # Battle End Game Mode (gametype)\n self.writeVInt(fields[\"Rank\"]) # Result (Victory/Defeat/Draw/Rank Score)\n self.writeVInt(0) # Tokens Gained (Gained Keys)\n self.writeVInt(0) # Trophies Result (Metascore change)\n self.writeVInt(0) # Power Play Points Gained (Pro League Points)\n self.writeVInt(0) # Doubled Tokens (Double Keys)\n self.writeVInt(0) # Double Token Event (Double Event Keys)\n self.writeVInt(0) # Token Doubler Remaining (Double Keys Remaining)\n self.writeVInt(0) # game Lenght In Seconds\n self.writeVInt(0) # Epic Win Power Play Points Gained (op Win Points)\n self.writeVInt(0) # Championship Level Reached (CC Wins)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n\n self.writeVInt(fields[\"HeroesCount\"])\n for heroEntry in fields[\"Heroes\"]:\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeByte(1)\n for i in range(1):\n self.writeDataReference(heroEntry[\"Brawler\"][\"ID\"][0], heroEntry[\"Brawler\"][\"ID\"][1])\n self.writeByte(1)\n for i in range(1):\n if (heroEntry[\"Brawler\"][\"SkinID\"] is None):\n self.writeVInt(0)\n else:\n self.writeDataReference(heroEntry[\"Brawler\"][\"SkinID\"][0], heroEntry[\"Brawler\"][\"SkinID\"][1])\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(1250)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(11)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n if heroEntry[\"IsPlayer\"]:\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(heroEntry[\"PlayerName\"])\n self.writeVInt(100)\n self.writeVInt(28000000)\n self.writeVInt(43000000)\n self.writeVInt(-2)\n if heroEntry[\"IsPlayer\"]:\n self.writeBoolean(True)\n self.writeVLong(5, 4181497)\n self.writeString('haccer club')\n self.writeDataReference(8, 16)\n else:\n self.writeBoolean(False)\n\n self.writeInt8(1)\n self.writeVInt(5978)\n self.writeInt8(1)\n self.writeVInt(0)\n\n self.writeInt16(5)\n self.writeInt16(3)\n self.writeInt(27328)\n self.writeInt(25659)\n\n self.writeDataReference(0)\n\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n\n def decode(self):\n fields = {}\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23456\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AvailableServerCommandMessage", "path": "Heart/Packets/Server/Home/AvailableServerCommandMessage.py", "snippet": "class AvailableServerCommandMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(fields[\"Command\"][\"ID\"])\n command = LogicCommandManager.createCommand(fields[\"Command\"][\"ID\"], self.messagePayload)\n self.messagePayload = command.encode(fields)\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24111\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LobbyInfoMessage", "path": "Heart/Packets/Server/Home/LobbyInfoMessage.py", "snippet": "class LobbyInfoMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(ClientsManager.GetCount())\n self.writeString(f\"\"\"Version: {player.ClientVersion}\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\"\")\n self.writeVInt(0) # count event\n self.writeVInt(0) # new timer in v51\n\n def decode(self):\n fields = {}\n fields[\"PlayerCount\"] = self.readVInt()\n fields[\"Text\"] = self.readString()\n fields[\"Unk1\"] = self.readVInt()\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23457\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OwnHomeDataMessage", "path": "Heart/Packets/Server/Home/OwnHomeDataMessage.py", "snippet": "class OwnHomeDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1688816070)\n self.writeVInt(1191532375)\n self.writeVInt(2023189)\n self.writeVInt(73530)\n\n self.writeVInt(player.Trophies)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(player.HighestTrophies) \n self.writeVInt(player.TrophyRoadTier)\n self.writeVInt(player.Experience)\n self.writeDataReference(28, player.Thumbnail)\n self.writeDataReference(43, player.Namecolor)\n\n self.writeVInt(26)\n for x in range(26):\n self.writeVInt(x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n \n self.writeVInt(len(player.OwnedSkins))\n for x in player.OwnedSkins:\n self.writeDataReference(29, x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(0)\n self.writeVInt(2)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(115)\n self.writeVInt(335442)\n self.writeVInt(1001442)\n self.writeVInt(5778642) \n\n self.writeVInt(120)\n self.writeVInt(200)\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(1) # Shop Offers\n\n self.writeVInt(1) # RewardCount\n\n self.writeVInt(38) # ItemType\n self.writeVInt(1337) # Amount\n self.writeDataReference(0) # CsvID\n self.writeVInt(0) # SkinID\n\n self.writeVInt(0) # Currency(0-Gems, 1-Gold, 3-StarpoInts)\n self.writeVInt(0) # Cost\n self.writeVInt(0) # Time\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # Daily Offer\n self.writeVInt(0) # Old price\n self.writeString('Offer') # Text\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString(\"offer_bgr_xmas23\") # Background\n self.writeVInt(0)\n self.writeBoolean(False) # This purchase is already being processed\n self.writeVInt(0) # Type Benefit\n self.writeVInt(0) # Benefit\n self.writeString()\n self.writeBoolean(False) # One time offer\n self.writeBoolean(False) # Claimed\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n \n self.writeVInt(20)\n self.writeVInt(1428)\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n self.writeVInt(30)\n\n self.writeByte(1) # count brawlers selected\n self.writeDataReference(16, player.SelectedBrawlers[0]) # selected brawler\n self.writeString(player.Region) # location\n self.writeString(player.ContentCreator) # supported creator\n\n self.writeVInt(6) \n self.writeVInt(1) \n self.writeVInt(9) \n self.writeVInt(1) \n self.writeVInt(22) \n self.writeVInt(3) \n self.writeVInt(25) \n self.writeVInt(1) \n self.writeVInt(24) \n self.writeVInt(0)\n self.writeVInt(15)\n self.writeVInt(32447)\n self.writeVInt(28)\n\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n for season in range(1):\n self.writeVInt(22-1)\n self.writeVInt(40000)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(0) \n\n self.writeBoolean(True) # Vanity items\n self.writeVInt(len(player.OwnedThumbnails)+len(player.OwnedPins))\n for x in player.OwnedThumbnails:\n self.writeVInt(28)\n self.writeVInt(x)\n self.writeVInt(0)\n for x in player.OwnedPins:\n self.writeVInt(52)\n self.writeVInt(x)\n self.writeVInt(0)\n\n\n self.writeBoolean(False) # Power league season data\n\n self.writeInt(0)\n self.writeVInt(0)\n self.writeVInt(16)\n self.writeVInt(76)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2023189)\n\n self.writeVInt(35) # event slot id\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(3)\n self.writeVInt(4)\n self.writeVInt(5)\n self.writeVInt(6)\n self.writeVInt(7)\n self.writeVInt(8)\n self.writeVInt(9)\n self.writeVInt(10)\n self.writeVInt(11)\n self.writeVInt(12)\n self.writeVInt(13) \n self.writeVInt(14)\n self.writeVInt(15)\n self.writeVInt(16)\n self.writeVInt(17)\n self.writeVInt(18) \n self.writeVInt(19)\n self.writeVInt(20)\n self.writeVInt(21) \n self.writeVInt(22)\n self.writeVInt(23)\n self.writeVInt(24)\n self.writeVInt(25)\n self.writeVInt(26)\n self.writeVInt(27)\n self.writeVInt(28)\n self.writeVInt(29)\n self.writeVInt(30)\n self.writeVInt(31)\n self.writeVInt(32)\n self.writeVInt(33)\n self.writeVInt(34)\n self.writeVInt(35)\n\n self.writeVInt(1)\n\n self.writeVInt(4)\n self.writeVInt(7)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(72292)\n self.writeVInt(10) \n self.writeDataReference(15, 21) # map id\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeString(\"\")\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # MapMaker map structure array\n self.writeVInt(0)\n self.writeBoolean(False) # Power League array entry\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeVInt(0) \n self.writeVInt(0) \n self.writeVInt(0) \n self.writeBoolean(False) \n\n self.writeVInt(0)\n \n ByteStreamHelper.encodeIntList(self, [20, 35, 75, 140, 290, 480, 800, 1250, 1875, 2800])\n ByteStreamHelper.encodeIntList(self, [30, 80, 170, 360]) # Shop Coins Price\n ByteStreamHelper.encodeIntList(self, [300, 880, 2040, 4680]) # Shop Coins Amount\n\n self.writeVInt(0) \n\n self.writeVInt(1)\n self.writeVInt(41000086) # theme\n self.writeVInt(1)\n\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(4)\n\n ByteStreamHelper.encodeIntList(self, [0, 29, 79, 169, 349, 699])\n ByteStreamHelper.encodeIntList(self, [0, 160, 450, 500, 1250, 2500])\n\n self.writeLong(0, 1) # Player ID\n\n self.writeVInt(0) # Notification factory\n \n self.writeVInt(1)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeBoolean(False) # Login Calendar\n self.writeVInt(0)\n self.writeBoolean(True) # Starr Road\n for i in range(7):\n self.writeVInt(0)\n\n self.writeVInt(0) # Mastery\n\n #BattleCard\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n\n self.writeVInt(0) #Brawler's BattleCards\n\n self.writeVInt(5)\n for i in range(5):\n self.writeDataReference(80, i)\n self.writeVInt(-1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(86400*24)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(False)\n\n # end LogicClientHome\n\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeStringReference(player.Name)\n self.writeBoolean(player.Registered)\n self.writeInt(-1)\n\n self.writeVInt(17)\n unlocked_brawler = [i['CardID'] for x,i in player.OwnedBrawlers.items()]\n self.writeVInt(len(unlocked_brawler) + 2)\n for x in unlocked_brawler:\n self.writeDataReference(23, x)\n self.writeVInt(-1)\n self.writeVInt(1)\n\n self.writeDataReference(5, 8)\n self.writeVInt(-1)\n self.writeVInt(player.Coins)\n\n self.writeDataReference(5, 23)\n self.writeVInt(-1)\n self.writeVInt(player.Blings)\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"Trophies\"])\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroHighScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"HighestTrophies\"])\n\n self.writeVInt(0) # Array\n\n self.writeVInt(0) # HeroPower\n \n self.writeVInt(len(player.OwnedBrawlers)) # HeroLevel\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"PowerLevel\"]-1)\n\n self.writeVInt(0) # hero star power gadget and hypercharge\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroSeenState\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(2)\n\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n\n self.writeVInt(player.Gems) # Diamonds\n self.writeVInt(player.Gems) # Free Diamonds\n self.writeVInt(10) # Player Level\n self.writeVInt(100)\n self.writeVInt(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1\n self.writeVInt(100) # Battle Count\n self.writeVInt(10) # WinCount\n self.writeVInt(80) # LoseCount\n self.writeVInt(50) # WinLooseStreak\n self.writeVInt(20) # NpcWinCount\n self.writeVInt(0) # NpcLoseCount\n self.writeVInt(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0\n self.writeVInt(12)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString()\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(1)\n\n def decode(self):\n fields = {}\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveServerMessage", "path": "Heart/Packets/Server/Socket/KeepAliveServerMessage.py", "snippet": "class KeepAliveServerMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "PlayerProfileMessage", "path": "Heart/Packets/Server/Home/PlayerProfileMessage.py", "snippet": "class PlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVLong(fields[\"PlayerHighID\"], fields[\"PlayerLowID\"])\n self.writeDataReference(16,11) # \n self.writeVInt(70)\n for i in range(70):\n self.writeDataReference(16, i)\n self.writeDataReference(0)\n self.writeVInt(500) # trophies\n self.writeVInt(1250) # highestTrophies\n self.writeVInt(11) #power level\n \n self.writeVInt(18)\n\n self.writeVInt(1) \n self.writeVInt(1) # 3v3 victories\n\n self.writeVInt(2)\n self.writeVInt(528859) # total exp\n\n self.writeVInt(3)\n self.writeVInt(3) # current trophies\n\n self.writeVInt(4)\n self.writeVInt(4) # highest trophies\n\n self.writeVInt(5) \n self.writeVInt(5) # unlocked brawler?\n\n self.writeVInt(8)\n self.writeVInt(6) # solo victories\n\n self.writeVInt(11) \n self.writeVInt(7) # duo victories\n\n self.writeVInt(9) \n self.writeVInt(8) # highest level robo rumble\n\n self.writeVInt(12) \n self.writeVInt(9) # highest level boss fight\n\n self.writeVInt(13)\n self.writeVInt(10) # highest power league points\n\n self.writeVInt(14)\n self.writeVInt(11) # some power league stuff\n\n self.writeVInt(15)\n self.writeVInt(12) # most challenge win\n\n self.writeVInt(16) #highest level city rampage\n self.writeVInt(13)\n\n self.writeVInt(18) #highest solo power league rank\n self.writeVInt(14)\n\n self.writeVInt(17) #highest team power league rank\n self.writeVInt(15)\n\n self.writeVInt(19) # highest Club league rank\n self.writeVInt(16)\n\n self.writeVInt(20) # number fame\n self.writeVInt(1000)\n\n self.writeVInt(21)\n self.writeVInt(502052) #v50\n\n self.writeString(player.Name) #PlayerInfo\n self.writeVInt(100)\n self.writeVInt(28000000 + player.Thumbnail)\n self.writeVInt(43000000 + player.Namecolor)\n self.writeVInt(14)\n\n self.writeBoolean(True)\n self.writeVInt(300)\n\n self.writeString(\"hello world\")\n self.writeVInt(100)\n self.writeVInt(200)\n self.writeDataReference(29, 558)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n\n self.writeBoolean(True) #alliance\n self.writeLong(0,1) #alliance ID\n self.writeString(\"haccers\") #alliance name\n self.writeDataReference(8,1) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(10000) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeDataReference(0)\n self.writeString(\"RU\") #location\n self.writeVInt(4) # unknown\n self.writeBoolean(True) #is Family friendly\n self.writeVInt(0)\n \n\n self.writeDataReference(25, 1) #alliance role\n self.writeVInt(16)\n\n def decode(self):\n pass\n # fields = {}\n # fields[\"PlayerCount\"] = self.readVInt()\n # fields[\"Text\"] = self.readString()\n # fields[\"Unk1\"] = self.readVInt()\n # super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24113\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "MyAllianceMessage", "path": "Heart/Packets/Server/Home/MyAllianceMessage.py", "snippet": "class MyAllianceMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1) # Online people in alliance\n self.writeBoolean(True) # isInAlliance\n self.writeDataReference(25, 4)\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(3) # type\n self.writeVInt(1) # member count\n self.writeVInt(9500) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(3) # unknown\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24399\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AllianceDataMessage", "path": "Heart/Packets/Server/Home/AllianceDataMessage.py", "snippet": "class AllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeBoolean(True)\n\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(player.Trophies) # total trophies\n self.writeVInt(0) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(1) # people online\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n self.writeString(\"this is the hacciest club in the world\")\n\n self.writeVInt(1) # member count\n self.writeLong(player.ID[0], player.ID[1]) # player ID\n self.writeVInt(2) # role\n self.writeVInt(player.Trophies) # trophies\n self.writeVInt(0) # status: 0=offline 2=online\n self.writeVInt(1) # last connected time seconds ?\n highestPowerLeagueRank = 2\n self.writeVInt(highestPowerLeagueRank)\n if highestPowerLeagueRank != 0:\n self.writeVInt(2) #solo\n self.writeVInt(1) #duo\n self.writeBoolean(False) # boolean always false?\n\n self.writeString(player.Name) # player name\n self.writeVInt(100) # VInt always 100\n self.writeVInt(28000000 + player.Thumbnail) # thumbnail\n self.writeVInt(43000000 + player.Namecolor) # name color\n self.writeVInt(46000000 + player.Namecolor)\n\n self.writeVInt(-1) # most people have it -1 but some with something\n self.writeBoolean(False) # whats this ? only 2/30 people have it true in my club\n week = 58 # week 58 of club league as of 2023/07/05, this number is 0 if you just arrived in the club\n self.writeVInt(week)\n if week != 0: # club league week number?\n self.writeVInt(3) # day\n self.writeVInt(18) # total club trophies earned\n self.writeVInt(0) # event day club trophies earned\n self.writeVInt(8) # total tickets used\n self.writeVInt(0) # event day tickets used\n self.writeVInt(6) # event day max tickets\n self.writeVInt(6) # event day tickets left\n self.writeVInt(0) # event day player ranking\n self.writeBoolean(True) # everyone have it to true\n self.writeVInt(200) # player experience lvl but why tf it doesn't show for some people\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24301\n\n def getMessageVersion(self):\n return self.messageVersion" } ]
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
13,938
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage',
10100: ClientHelloMessage,
0
2023-12-14 18:57:56+00:00
16k
pan-x-c/EE-LLM
megatron/core/models/gpt/gpt_layer_specs.py
[ { "identifier": "get_bias_dropout_add", "path": "megatron/core/fusions/fused_bias_dropout.py", "snippet": "def get_bias_dropout_add(training, fused):\n if fused:\n # jit scripting for a nn.module (with dropout) is not\n # triggering the fusion kernel. For now, we use two\n # different nn.functional routines to account for varying\n # dropout semantics during training and inference phases.\n if training:\n return bias_dropout_add_fused_train\n else:\n return bias_dropout_add_fused_inference\n else:\n return bias_dropout_add_unfused(training)" }, { "identifier": "FusedLayerNorm", "path": "megatron/core/fusions/fused_layer_norm.py", "snippet": "class FusedLayerNorm(torch.nn.Module):\n def __init__(\n self,\n hidden_size,\n eps=1e-5,\n persist_layer_norm=True,\n sequence_parallel=False,\n zero_centered_gamma=False,\n normalization=\"LayerNorm\",\n ):\n super().__init__()\n\n self.zero_centered_gamma = zero_centered_gamma\n self.normalization = normalization\n assert normalization == \"LayerNorm\", '({}) is not supported in ' 'FusedLayerNorm'.format(\n normalization\n )\n\n # List of hiddens sizes supported in the persistent layer norm kernel\n # If the hidden size is not supported, fall back to the non-persistent\n # kernel.\n persist_ln_hidden_sizes = [\n 1024,\n 1536,\n 2048,\n 2304,\n 3072,\n 3840,\n 4096,\n 5120,\n 6144,\n 8192,\n 10240,\n 12288,\n 12800,\n 15360,\n 16384,\n 18432,\n 20480,\n 24576,\n 25600,\n 30720,\n 32768,\n 40960,\n 49152,\n 65536,\n ]\n if hidden_size not in persist_ln_hidden_sizes or not HAVE_PERSIST_LAYER_NORM:\n persist_layer_norm = False\n\n if not persist_layer_norm and not HAVE_FUSED_LAYER_NORM:\n # TODO: Add pytorch only layer norm\n raise ValueError(f'Apex must currently be installed to use megatron core.')\n\n if isinstance(hidden_size, numbers.Integral):\n hidden_size = (hidden_size,)\n self.hidden_size = torch.Size(hidden_size)\n self.eps = eps\n self.weight = Parameter(torch.Tensor(*hidden_size))\n self.bias = Parameter(torch.Tensor(*hidden_size))\n self.reset_parameters()\n self.persist_layer_norm = persist_layer_norm\n self.sequence_parallel = sequence_parallel\n\n # set sequence parallelism flag on weight and bias parameters\n setattr(self.weight, 'sequence_parallel', self.sequence_parallel)\n setattr(self.bias, 'sequence_parallel', self.sequence_parallel)\n\n def reset_parameters(self):\n\n if self.zero_centered_gamma:\n init.zeros_(self.weight)\n init.zeros_(self.bias)\n else:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, input):\n\n weight = self.weight + 1 if self.zero_centered_gamma else self.weight\n\n if self.persist_layer_norm:\n output = FastLayerNormFN.apply(input, weight, self.bias, self.eps)\n\n # Apex's fast layer norm function outputs a 'view' tensor (i.e., has\n # a populated '_base' field). This will result in schedule.py's\n # deallocate_output_tensor() throwing an error, so a viewless tensor is\n # created to prevent this.\n output = make_viewless_tensor(\n inp=output, requires_grad=input.requires_grad, keep_graph=True\n )\n\n else:\n output = FusedLayerNormAffineFunction.apply(\n input, weight, self.bias, self.hidden_size, self.eps\n )\n\n return output" }, { "identifier": "ColumnParallelLinear", "path": "megatron/core/tensor_parallel/layers.py", "snippet": "class ColumnParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with column parallelism.\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its second dimension as A = [A_1, ..., A_p].\n\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n\n Keyword Arguments\n bias: If true, add bias\n gather_output: If true, call all-gather on output and make Y available\n to all GPUs, otherwise, every GPU will have its output\n which is Y_i = XA_i\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n skip_bias_add: If True, do not add the bias term, instead\n return it to be added by the caller. This\n enables performance optimations where bias can\n be fused with other elementwise operations.\n skip_weight_param_allocation: If True, weight parameter is not allocated and must be passed\n as a keyword argument `weight` during the forward pass. Note\n that this does not affect bias, which will be allocated if\n bias is True. Defaults to False.\n is_expert: If True, the layer is treated as an MoE expert layer.\n config: ModelParallelConfig object\n\n \"\"\"\n\n def __init__(\n self,\n input_size,\n output_size,\n *,\n config: ModelParallelConfig,\n init_method: Callable,\n bias=True,\n gather_output=False,\n stride=1,\n keep_master_weight_for_test=False,\n skip_bias_add=False,\n skip_weight_param_allocation: bool = False,\n is_expert: bool = False,\n ):\n super(ColumnParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.gather_output = gather_output\n # Divide the weight matrix along the last dimension.\n world_size = get_tensor_model_parallel_world_size()\n self.output_size_per_partition = divide(output_size, world_size)\n self.skip_bias_add = skip_bias_add\n self.is_expert = is_expert\n self.expert_parallel = config.expert_model_parallel_size > 1\n self.config = config\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n # Initialize weight.\n if not skip_weight_param_allocation:\n if config.use_cpu_initialization:\n self.weight = Parameter(\n torch.empty(\n self.output_size_per_partition, self.input_size, dtype=config.params_dtype\n )\n )\n if config.perform_initialization:\n self.master_weight = _initialize_affine_weight_cpu(\n self.weight,\n self.output_size,\n self.input_size,\n self.output_size_per_partition,\n 0,\n init_method,\n stride=stride,\n return_master_weight=keep_master_weight_for_test,\n )\n else:\n self.weight = Parameter(\n torch.empty(\n self.output_size_per_partition,\n self.input_size,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n if config.perform_initialization:\n _initialize_affine_weight_gpu(\n self.weight,\n init_method,\n partition_dim=0,\n stride=stride,\n expert_parallel=(self.is_expert and self.expert_parallel),\n )\n\n setattr(self.weight, 'allreduce', not (self.is_expert and self.expert_parallel))\n else:\n self.weight = None\n\n if bias:\n if config.use_cpu_initialization:\n self.bias = Parameter(\n torch.empty(self.output_size_per_partition, dtype=config.params_dtype)\n )\n else:\n self.bias = Parameter(\n torch.empty(\n self.output_size_per_partition,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n set_tensor_model_parallel_attributes(self.bias, True, 0, stride)\n if config.perform_initialization:\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n setattr(self.bias, 'allreduce', not (self.is_expert and self.expert_parallel))\n else:\n self.register_parameter('bias', None)\n\n self.async_tensor_model_parallel_allreduce = (\n config.async_tensor_model_parallel_allreduce and world_size > 1\n )\n\n self.sequence_parallel = config.sequence_parallel\n if self.sequence_parallel and world_size <= 1:\n warnings.warn(\n f\"`sequence_parallel` is set to `True`, but tensor model parallel size is {world_size}. \"\n f\"Disabling sequence parallel.\"\n )\n self.sequence_parallel = False\n\n if config.gradient_accumulation_fusion and not _grad_accum_fusion_available:\n raise RuntimeError(\n \"ColumnParallelLinear was called with gradient_accumulation_fusion set \"\n \"to True but the custom CUDA extension fused_weight_gradient_mlp_cuda \"\n \"module is not found. To use gradient_accumulation_fusion you must \"\n \"install APEX with --cpp_ext and --cuda_ext. For example: \"\n \"pip install --global-option=\\\"--cpp_ext\\\" --global-option=\\\"--cuda_ext .\\\" \"\n \"Note that the extension requires CUDA>=11. Otherwise, you must turn off \"\n \"gradient accumulation fusion.\"\n )\n self.gradient_accumulation_fusion = config.gradient_accumulation_fusion\n\n if self.async_tensor_model_parallel_allreduce and self.sequence_parallel:\n raise RuntimeError(\n \"`async_tensor_model_parallel_allreduce` and `sequence_parallel` \"\n \"cannot be enabled at the same time.\"\n )\n\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n self.explicit_expert_comm = self.is_expert and (\n self.sequence_parallel or self.expert_parallel\n )\n\n def forward(self, input_: torch.Tensor, weight: Optional[torch.Tensor] = None):\n \"\"\"Forward of ColumnParallelLinear\n\n Args:\n input_: 3D tensor whose order of dimension is [sequence, batch, hidden]\n\n weight (optional): weight tensor to use, compulsory when\n skip_weight_param_allocation is True.\n\n Returns:\n - output\n - bias\n\n \"\"\"\n if weight is None:\n if self.weight is None:\n raise RuntimeError(\n \"weight was not supplied to ColumnParallelLinear forward pass \"\n \"and skip_weight_param_allocation is True.\"\n )\n weight = self.weight\n else:\n # Check the weight passed in is the correct shape\n expected_shape = (self.output_size_per_partition, self.input_size)\n if weight.shape != expected_shape:\n raise RuntimeError(\n f\"supplied weight's shape is {tuple(weight.shape)}, \"\n f\"not {expected_shape} as expected\"\n )\n\n bias = self.bias if not self.skip_bias_add else None\n\n if (\n self.async_tensor_model_parallel_allreduce\n or self.sequence_parallel\n or self.explicit_expert_comm\n ):\n input_parallel = input_\n else:\n input_parallel = copy_to_tensor_model_parallel_region(input_)\n\n # Matrix multiply.\n if not weight.requires_grad:\n self._forward_impl = linear_with_frozen_weight\n else:\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n output_parallel = self._forward_impl(\n input=input_parallel,\n weight=weight,\n bias=bias,\n gradient_accumulation_fusion=self.gradient_accumulation_fusion,\n async_grad_allreduce=False\n if self.explicit_expert_comm\n else self.async_tensor_model_parallel_allreduce,\n sequence_parallel=False if self.explicit_expert_comm else self.sequence_parallel,\n )\n if self.gather_output:\n # All-gather across the partitions.\n assert not self.sequence_parallel\n output = gather_from_tensor_model_parallel_region(output_parallel)\n else:\n output = output_parallel\n output_bias = self.bias if self.skip_bias_add else None\n return output, output_bias" }, { "identifier": "RowParallelLinear", "path": "megatron/core/tensor_parallel/layers.py", "snippet": "class RowParallelLinear(torch.nn.Module):\n \"\"\"Linear layer with row parallelism.\n\n The linear layer is defined as Y = XA + b. A is parallelized along\n its first dimension and X along its second dimension as:\n - -\n | A_1 |\n | . |\n A = | . | X = [X_1, ..., X_p]\n | . |\n | A_p |\n - -\n Arguments:\n input_size: first dimension of matrix A.\n output_size: second dimension of matrix A.\n\n Keyword Arguments:\n bias: If true, add bias. Note that bias is not parallelized.\n input_is_parallel: If true, we assume that the input is already\n split across the GPUs and we do not split\n again.\n init_method: method to initialize weights. Note that bias is always set\n to zero.\n stride: For the strided linear layers.\n keep_master_weight_for_test: This was added for testing and should be\n set to False. It returns the master weights\n used for initialization.\n skip_bias_add: If True, do not add the bias term, instead\n return it to be added by the caller. This\n enables performance optimations where bias can\n be fused with other elementwise operations.\n is_expert: If True, the layer is treated as an MoE expert layer\n config: ModelParallelConfig object\n\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n *,\n config: ModelParallelConfig,\n init_method: Callable,\n bias: bool = True,\n input_is_parallel: bool = False,\n stride: int = 1,\n keep_master_weight_for_test: bool = False,\n skip_bias_add: bool = False,\n is_expert: bool = False,\n ):\n super(RowParallelLinear, self).__init__()\n\n # Keep input parameters\n self.input_size = input_size\n self.output_size = output_size\n self.input_is_parallel = input_is_parallel\n # Divide the weight matrix along the last dimension.\n world_size = get_tensor_model_parallel_world_size()\n self.input_size_per_partition = divide(input_size, world_size)\n self.skip_bias_add = skip_bias_add\n self.config = config\n self.is_expert = is_expert\n self.expert_parallel = config.expert_model_parallel_size > 1\n self.gradient_accumulation_fusion = config.gradient_accumulation_fusion\n self.sequence_parallel = config.sequence_parallel\n if self.sequence_parallel and not self.input_is_parallel:\n raise RuntimeError(\"To enable `sequence_parallel`, `input_is_parallel` must be `True`\")\n\n # Parameters.\n # Note: torch.nn.functional.linear performs XA^T + b and as a result\n # we allocate the transpose.\n # Initialize weight.\n if config.use_cpu_initialization:\n self.weight = Parameter(\n torch.empty(\n self.output_size, self.input_size_per_partition, dtype=config.params_dtype\n )\n )\n if config.perform_initialization:\n self.master_weight = _initialize_affine_weight_cpu(\n self.weight,\n self.output_size,\n self.input_size,\n self.input_size_per_partition,\n 1,\n init_method,\n stride=stride,\n return_master_weight=keep_master_weight_for_test,\n params_dtype=config.params_dtype,\n )\n else:\n self.weight = Parameter(\n torch.empty(\n self.output_size,\n self.input_size_per_partition,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n if config.perform_initialization:\n _initialize_affine_weight_gpu(\n self.weight,\n init_method,\n partition_dim=1,\n stride=stride,\n expert_parallel=(self.is_expert and self.expert_parallel),\n )\n setattr(self.weight, 'allreduce', not (self.is_expert and self.expert_parallel))\n\n if bias:\n if config.use_cpu_initialization:\n self.bias = Parameter(torch.empty(self.output_size, dtype=config.params_dtype))\n else:\n self.bias = Parameter(\n torch.empty(\n self.output_size,\n device=torch.cuda.current_device(),\n dtype=config.params_dtype,\n )\n )\n\n if config.perform_initialization:\n # Always initialize bias to zero.\n with torch.no_grad():\n self.bias.zero_()\n setattr(self.bias, 'allreduce', not (self.is_expert and self.expert_parallel))\n setattr(self.bias, 'sequence_parallel', self.sequence_parallel)\n else:\n self.register_parameter('bias', None)\n\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n self.explicit_expert_comm = self.is_expert and (\n self.sequence_parallel or self.expert_parallel\n )\n\n def forward(self, input_):\n \"\"\"Forward of RowParallelLinear\n\n Args:\n input_: 3D tensor whose order of dimension is [sequence, batch, hidden]\n\n Returns:\n - output\n - bias\n \"\"\"\n # Set up backprop all-reduce.\n if self.input_is_parallel:\n input_parallel = input_\n else:\n assert not self.sequence_parallel\n input_parallel = scatter_to_tensor_model_parallel_region(input_)\n # Matrix multiply.\n if not self.weight.requires_grad:\n self._forward_impl = linear_with_frozen_weight\n else:\n self._forward_impl = linear_with_grad_accumulation_and_async_allreduce\n output_parallel = self._forward_impl(\n input=input_parallel,\n weight=self.weight,\n bias=None,\n gradient_accumulation_fusion=self.gradient_accumulation_fusion,\n async_grad_allreduce=False,\n sequence_parallel=False,\n )\n\n # All-reduce across all the partitions.\n if self.explicit_expert_comm:\n assert self.skip_bias_add\n output_ = output_parallel\n elif self.sequence_parallel:\n output_ = reduce_scatter_to_sequence_parallel_region(output_parallel)\n else:\n output_ = reduce_from_tensor_model_parallel_region(output_parallel)\n if not self.skip_bias_add:\n output = (output_ + self.bias) if self.bias is not None else output_\n output_bias = None\n else:\n output = output_\n output_bias = self.bias\n return output, output_bias" }, { "identifier": "SelfAttention", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttention(Attention):\n \"\"\"Self-attention layer class\n\n Self-attention layer takes input with size [s, b, h]\n and returns output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: SelfAttentionSubmodules,\n layer_number: int = 1,\n attn_mask_type=AttnMaskType.padding,\n **kwargs,\n ):\n super().__init__(\n config=config,\n submodules=submodules,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type,\n **kwargs,\n )\n\n self.linear_qkv = build_module(\n submodules.linear_qkv,\n self.config.hidden_size,\n self.query_projection_size + 2 * self.kv_projection_size,\n config=self.config,\n init_method=self.config.init_method,\n bias=self.config.add_bias_linear,\n skip_bias_add=False,\n )\n\n def get_query_key_value_tensors(self, hidden_states, key_value_states=None):\n \"\"\"\n Derives `query`, `key` and `value` tensors from `hidden_states`.\n \"\"\"\n # Attention heads [sq, b, h] --> [sq, b, ng * (np/ng + 2) * hn)]\n mixed_qkv, _ = self.linear_qkv(hidden_states)\n\n # [sq, b, hp] --> [sq, b, ng, (np/ng + 2) * hn]\n new_tensor_shape = mixed_qkv.size()[:-1] + (\n self.num_query_groups_per_partition,\n (\n (self.num_attention_heads_per_partition // self.num_query_groups_per_partition + 2)\n * self.hidden_size_per_attention_head\n ),\n )\n mixed_qkv = mixed_qkv.view(*new_tensor_shape)\n\n # [sq, b, ng, (np/ng + 2) * hn] --> [sq, b, ng, np/ng * hn], [sq, b, ng, hn], [sq, b, ng, hn]\n (query, key, value) = torch.split(\n mixed_qkv,\n [\n (\n self.num_attention_heads_per_partition\n // self.num_query_groups_per_partition\n * self.hidden_size_per_attention_head\n ),\n self.hidden_size_per_attention_head,\n self.hidden_size_per_attention_head,\n ],\n dim=3,\n )\n # [sq, b, ng, np/ng * hn] -> [sq, b, np, hn]\n query = query.reshape(query.size(0), query.size(1), -1, self.hidden_size_per_attention_head)\n\n return query, key, value" }, { "identifier": "SelfAttentionSubmodules", "path": "megatron/core/transformer/attention.py", "snippet": "class SelfAttentionSubmodules:\n linear_qkv: Union[ModuleSpec, type] = None\n dot_product_attention: Union[ModuleSpec, type] = None\n linear_proj: Union[ModuleSpec, type] = None" }, { "identifier": "TEDotProductAttention", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TEDotProductAttention(te.pytorch.DotProductAttention):\n \"\"\"\n Wrapper for the Transformer-Engine's `DotProductAttention` layer that also\n has \"flash attention\" enabled.\n\n Note that if Megatron's parallel_state has not been initialized yet, the\n tp_group and cp_group passed to TE will be None and must be set later\n via set_tensor_parallel_group() and set_context_parallel_group().\n \"\"\"\n\n cp_stream: torch.cuda.Stream = None\n\n def __init__(\n self,\n config: TransformerConfig,\n layer_number: int = 1,\n attn_mask_type: AttnMaskType = AttnMaskType.padding,\n **kwargs\n ):\n self.config = config\n\n # Only Transformer-Engine version > 0.13.0 supports context parallelism\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version > packaging.version.Version(\"0.13.0\"):\n if getattr(TEDotProductAttention, \"cp_stream\") is None:\n TEDotProductAttention.cp_stream = torch.cuda.Stream()\n kwargs[\"cp_group\"] = get_context_parallel_group(check_initialized=False)\n kwargs[\"cp_global_ranks\"] = get_context_parallel_global_ranks(check_initialized=False)\n kwargs[\"cp_stream\"] = TEDotProductAttention.cp_stream\n else:\n assert (\n self.config.context_parallel_size == 1\n ), \"Only Transformer-Engine version > 0.13.0 supports context parallelism\"\n\n super().__init__(\n num_attention_heads=self.config.num_attention_heads,\n kv_channels=self.config.kv_channels,\n attention_dropout=self.config.attention_dropout,\n layer_number=layer_number,\n attn_mask_type=attn_mask_type.name,\n sequence_parallel=self.config.sequence_parallel,\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n **kwargs,\n )" }, { "identifier": "TELayerNormColumnParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TELayerNormColumnParallelLinear(te.pytorch.LayerNormLinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `LayerNormLinear` layer that combines\n layernorm and linear layers\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n output_size: int,\n config: TransformerConfig,\n init_method: Callable,\n bias: bool,\n skip_bias_add: bool,\n **kwargs\n ):\n self.config = config\n # TE returns a zero length Tensor when bias=False and\n # return_bias=True, but we prefer None. So in that case we\n # tell TE to not return the bias, and return None\n # ourselves. This way our forward always returns two values\n # and we don't have to deal with the zero length Tensor.\n self.te_return_bias = skip_bias_add and bias\n\n # Only Transformer-Engine version >= 0.11.0 supports `RMSNorm`\n te_version = packaging.version.Version(version(\"transformer-engine\"))\n if te_version >= packaging.version.Version(\"0.11.0\"):\n kwargs[\"normalization\"] = self.config.normalization\n\n super().__init__(\n in_features=input_size,\n out_features=output_size,\n bias=bias,\n sequence_parallel=self.config.sequence_parallel,\n fuse_wgrad_accumulation=self.config.gradient_accumulation_fusion,\n tp_group=get_tensor_model_parallel_group(check_initialized=False),\n tp_size=self.config.tensor_model_parallel_size,\n get_rng_state_tracker=get_cuda_rng_tracker,\n init_method=init_method,\n params_dtype=self.config.params_dtype,\n parallel_mode=\"column\",\n return_bias=self.te_return_bias,\n **_get_extra_te_kwargs(config),\n )\n\n def forward(self, x):\n out = super().forward(x)\n\n # TE only returns a tuple when return_bias is True, otherwise\n # it returns a single Tensor, we always want to return two\n # values regardless of the arguments.\n if self.te_return_bias:\n return out\n return out, None" }, { "identifier": "TERowParallelLinear", "path": "megatron/core/transformer/custom_layers/transformer_engine.py", "snippet": "class TERowParallelLinear(TELinear):\n \"\"\"\n Wrapper for the Transformer-Engine's `Linear` layer but specialized similar\n to megatron's `RowParallelLinear` layer.\n \"\"\"\n\n def __init__(self, input_size: int, output_size: int, config: TransformerConfig, **kwargs):\n self.config = config\n super().__init__(\n input_size=input_size,\n output_size=output_size,\n config=self.config,\n parallel_mode=\"row\",\n **kwargs,\n )" }, { "identifier": "DotProductAttention", "path": "megatron/core/transformer/dot_product_attention.py", "snippet": "class DotProductAttention(MegatronModule):\n \"\"\"\n Region where selective activation recomputation is applied.\n This region is memory intensive but less compute intensive which\n makes activation checkpointing more efficient for LLMs (20B+).\n See Reducing Activation Recomputation in Large Transformer Models: https://arxiv.org/abs/2205.05198 for more details.\n\n We use the following notation:\n h: hidden size\n n: number of attention heads\n p: number of tensor model parallel partitions\n b: batch size\n s: sequence length\n \"\"\"\n\n def __init__(\n self, config: TransformerConfig, layer_number: int = 1, attn_mask_type=AttnMaskType.padding\n ):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n assert (\n self.config.context_parallel_size == 1\n ), \"Context parallelism is only supported by TEDotProductAttention!\"\n\n self.layer_number = max(1, layer_number)\n self.attn_mask_type = attn_mask_type\n\n projection_size = self.config.kv_channels * config.num_attention_heads\n\n # Per attention head and per partition values.\n world_size = parallel_state.get_tensor_model_parallel_world_size()\n self.hidden_size_per_partition = divide(projection_size, world_size)\n self.hidden_size_per_attention_head = divide(projection_size, config.num_attention_heads)\n self.num_attention_heads_per_partition = divide(config.num_attention_heads, world_size)\n\n coeff = None\n self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)\n if self.config.apply_query_key_layer_scaling:\n coeff = self.layer_number\n self.norm_factor *= coeff\n\n self.scale_mask_softmax = FusedScaleMaskSoftmax(\n input_in_fp16=self.config.fp16,\n input_in_bf16=self.config.bf16,\n attn_mask_type=self.attn_mask_type,\n scaled_masked_softmax_fusion=self.config.masked_softmax_fusion,\n mask_func=attention_mask_func,\n softmax_in_fp32=self.config.attention_softmax_in_fp32,\n scale=coeff,\n )\n\n # Dropout. Note that for a single iteration, this layer will generate\n # different outputs on different number of parallel partitions but\n # on average it should not be partition dependent.\n self.attention_dropout = torch.nn.Dropout(self.config.attention_dropout)\n\n def forward(\n self, query_layer: Tensor, key_layer: Tensor, value_layer: Tensor, attention_mask: Tensor\n ):\n\n # ===================================\n # Raw attention scores. [b, n/p, s, s]\n # ===================================\n\n # [b, np, sq, sk]\n output_size = (\n query_layer.size(1),\n query_layer.size(2),\n query_layer.size(0),\n key_layer.size(0),\n )\n\n # [sq, b, np, hn] -> [sq, b * np, hn]\n # This will be a simple view when doing normal attention, but in group query attention\n # the key and value tensors are repeated to match the queries so you can't use simple strides\n # to extract the queries.\n query_layer = query_layer.reshape(output_size[2], output_size[0] * output_size[1], -1)\n # [sk, b, np, hn] -> [sk, b * np, hn]\n key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)\n\n # preallocting input tensor: [b * np, sq, sk]\n matmul_input_buffer = parallel_state.get_global_memory_buffer().get_tensor(\n (output_size[0] * output_size[1], output_size[2], output_size[3]),\n query_layer.dtype,\n \"mpu\",\n )\n\n # Raw attention scores. [b * np, sq, sk]\n matmul_result = torch.baddbmm(\n matmul_input_buffer,\n query_layer.transpose(0, 1), # [b * np, sq, hn]\n key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]\n beta=0.0,\n alpha=(1.0 / self.norm_factor),\n )\n\n # change view to [b, np, sq, sk]\n attention_scores = matmul_result.view(*output_size)\n\n # ===========================\n # Attention probs and dropout\n # ===========================\n\n # attention scores and attention mask [b, np, sq, sk]\n attention_probs: Tensor = self.scale_mask_softmax(attention_scores, attention_mask)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n\n if not self.config.sequence_parallel:\n with tensor_parallel.get_cuda_rng_tracker().fork():\n attention_probs = self.attention_dropout(attention_probs)\n else:\n attention_probs = self.attention_dropout(attention_probs)\n\n # =========================\n # Context layer. [sq, b, hp]\n # =========================\n\n # value_layer -> context layer.\n # [sk, b, np, hn] --> [b, np, sq, hn]\n\n # context layer shape: [b, np, sq, hn]\n output_size = (\n value_layer.size(1),\n value_layer.size(2),\n query_layer.size(0),\n value_layer.size(3),\n )\n\n # change view [sk, b * np, hn]\n value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)\n\n # change view [b * np, sq, sk]\n attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n\n # matmul: [b * np, sq, hn]\n context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))\n\n # change view [b, np, sq, hn]\n context_layer = context_layer.view(*output_size)\n\n # [b, np, sq, hn] --> [sq, b, np, hn]\n context_layer = context_layer.permute(2, 0, 1, 3).contiguous()\n\n # [sq, b, np, hn] --> [sq, b, hp]\n new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n return context_layer" }, { "identifier": "AttnMaskType", "path": "megatron/core/transformer/enums.py", "snippet": "class AttnMaskType(enum.Enum):\n padding = 1\n causal = 2" }, { "identifier": "MLP", "path": "megatron/core/transformer/mlp.py", "snippet": "class MLP(MegatronModule):\n \"\"\"\n MLP will take the input with h hidden state, project it to 4*h\n hidden dimension, perform nonlinear transformation, and project the\n state back into h hidden dimension.\n\n\n Returns an output and a bias to be added to the output.\n If config.add_bias_linear is False, the bias returned is None.\n\n We use the following notation:\n h: hidden size\n p: number of tensor model parallel partitions\n b: batch size\n s: sequence length\n \"\"\"\n\n def __init__(\n self, config: TransformerConfig, submodules: MLPSubmodules, is_expert: bool = False\n ):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n # If this is a gated linear unit we double the output width, see https://arxiv.org/pdf/2002.05202.pdf\n ffn_hidden_size = self.config.ffn_hidden_size\n if self.config.gated_linear_unit:\n ffn_hidden_size *= 2\n\n self.linear_fc1 = build_module(\n submodules.linear_fc1,\n self.config.hidden_size,\n ffn_hidden_size,\n config=self.config,\n init_method=self.config.init_method,\n gather_output=False,\n bias=self.config.add_bias_linear,\n skip_bias_add=True,\n is_expert=is_expert,\n )\n\n if self.config.gated_linear_unit:\n\n def glu(x):\n x = torch.chunk(x, 2, dim=-1)\n return self.config.activation_func(x[0]) * x[1]\n\n self.activation_func = glu\n else:\n self.activation_func = self.config.activation_func\n\n self.linear_fc2 = build_module(\n submodules.linear_fc2,\n self.config.ffn_hidden_size,\n self.config.hidden_size,\n config=self.config,\n init_method=self.config.output_layer_init_method,\n bias=self.config.add_bias_linear,\n input_is_parallel=True,\n skip_bias_add=True,\n is_expert=is_expert,\n )\n\n def forward(self, hidden_states):\n\n # [s, b, 4 * h/p]\n intermediate_parallel, bias_parallel = self.linear_fc1(hidden_states)\n\n if self.config.bias_gelu_fusion:\n assert self.config.add_bias_linear is True\n assert self.activation_func == F.gelu\n intermediate_parallel = bias_gelu_impl(intermediate_parallel, bias_parallel)\n else:\n if bias_parallel is not None:\n intermediate_parallel = intermediate_parallel + bias_parallel\n intermediate_parallel = self.activation_func(intermediate_parallel)\n\n # [s, b, h]\n output, output_bias = self.linear_fc2(intermediate_parallel)\n\n return output, output_bias" }, { "identifier": "MLPSubmodules", "path": "megatron/core/transformer/mlp.py", "snippet": "class MLPSubmodules:\n linear_fc1: Union[ModuleSpec, type] = None\n linear_fc2: Union[ModuleSpec, type] = None" }, { "identifier": "ModuleSpec", "path": "megatron/core/transformer/spec_utils.py", "snippet": "class ModuleSpec:\n \"\"\"This is a Module Specification dataclass.\n\n Specification defines the location of the module (to import dynamically)\n or the imported module itself. It also defines the params that need to be\n passed to initialize the module.\n\n Args:\n module (Union[Tuple, type]): A tuple describing the location of the\n module class e.g. `(module.location, ModuleClass)` or the imported\n module class itself e.g. `ModuleClass` (which is already imported\n using `from module.location import ModuleClass`).\n params (dict): A dictionary of params that need to be passed while init.\n\n \"\"\"\n\n module: Union[Tuple, type]\n params: dict = field(default_factory=lambda: {})\n submodules: type = None" }, { "identifier": "SwitchMLP", "path": "megatron/core/transformer/switch_mlp.py", "snippet": "class SwitchMLP(MegatronModule):\n \"\"\"\n Top-1 Mixture of Experts Layer. Routes input to one of N MLP \"experts\"\n Curently supports Sinkhorn based expert routing.\n \"\"\"\n\n def __init__(self, config: TransformerConfig, submodules: MLPSubmodules):\n super().__init__(config=config)\n\n self.config: TransformerConfig = config\n\n self.router = torch.nn.Linear(self.config.hidden_size, self.config.num_moe_experts)\n self.add_bias = config.add_bias_linear\n self.sequence_parallel = config.sequence_parallel\n self.route_algo = sinkhorn\n self.router_activation = torch.sigmoid\n self.expert_parallel_size = parallel_state.get_expert_model_parallel_world_size()\n\n assert self.config.num_moe_experts % self.expert_parallel_size == 0\n self.num_local_experts = self.config.num_moe_experts // self.expert_parallel_size\n local_expert_indices_offset = (\n parallel_state.get_expert_model_parallel_rank() * self.num_local_experts\n )\n self.local_expert_indices = [\n local_expert_indices_offset + i for i in range(self.num_local_experts)\n ]\n\n self.local_experts = torch.nn.ModuleList()\n for _ in range(self.num_local_experts):\n expert = MLP(self.config, submodules, is_expert=True)\n self.local_experts.append(expert)\n\n def gather_indices(self, local_indices):\n \"\"\" Gather tensors and concatenate along the first dimension.\"\"\"\n group = get_tensor_and_expert_parallel_group()\n world_size = torch.distributed.get_world_size(group=group)\n # Bypass the function if we are using only 1 GPU.\n if world_size == 1:\n return local_indices\n\n dim_size = list(local_indices.size())\n dim_size[0] = dim_size[0] * world_size\n\n # TODO pre allocate memory\n output = torch.empty(\n dim_size, dtype=local_indices.dtype, device=torch.cuda.current_device()\n )\n torch.distributed._all_gather_base(output, local_indices.contiguous(), group=group)\n return output\n\n def forward(self, hidden_states):\n hidden_shape = hidden_states.shape\n route = self.router(hidden_states)\n route = route.view(-1, self.config.num_moe_experts)\n\n if self.training:\n with torch.no_grad():\n norm_route = self.route_algo(\n route.detach().to(dtype=torch.float32)\n ) # explicit fp32 conversion for stability\n _, max_ind = torch.max(norm_route, dim=1)\n route = self.router_activation(route)\n max_prob = route[torch.arange(route.size(0)), max_ind]\n else:\n route = self.router_activation(route)\n max_prob, max_ind = torch.max(route, dim=1)\n\n max_prob = torch.unsqueeze(max_prob, 1)\n hidden_states = hidden_states.view(-1, hidden_shape[-1])\n\n if self.sequence_parallel or (self.expert_parallel_size > 1):\n global_hidden_states = tensor_parallel.gather_from_sequence_parallel_region_to_moe(\n hidden_states\n )\n global_indices = self.gather_indices(max_ind)\n else:\n global_hidden_states = hidden_states\n global_indices = max_ind\n\n output_total = torch.zeros_like(global_hidden_states)\n if self.add_bias:\n output_bias_total = torch.zeros_like(global_hidden_states)\n\n for expert_num, expert in enumerate(self.local_experts):\n local_expert_index = self.local_expert_indices[expert_num]\n local_indices = (global_indices == local_expert_index).nonzero()\n hidden = global_hidden_states[local_indices, :]\n output, output_bias = expert(hidden)\n\n output_total[local_indices, :] = output\n if self.add_bias:\n output_bias = output_bias.expand_as(output)\n output_bias_total[local_indices, :] = output_bias\n\n if self.sequence_parallel or (self.expert_parallel_size > 1):\n output_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe(\n output_total\n )\n if self.add_bias:\n output_bias_total = tensor_parallel.reduce_scatter_to_sequence_parallel_region_from_moe(\n output_bias_total\n )\n # bias is duplicated across tensor parallelism ranks;\n # reduce scatter reduces bias across tensor parallel_ranks\n output_bias_total = (\n output_bias_total / parallel_state.get_tensor_model_parallel_world_size()\n )\n\n output_total = output_total * max_prob\n output_total = output_total.view(hidden_shape)\n if self.add_bias:\n output_bias_total = output_bias_total * max_prob\n output_bias_total = output_bias_total.view(hidden_shape)\n else:\n output_bias_total = None\n\n return output_total, output_bias_total" }, { "identifier": "TransformerLayer", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayer(MegatronModule):\n \"\"\"A single transformer layer.\n\n Transformer layer takes input with size [s, b, h] and returns an\n output of the same size.\n \"\"\"\n\n def __init__(\n self,\n config: TransformerConfig,\n submodules: TransformerLayerSubmodules,\n layer_number: int = 1,\n self_attn_mask_type=AttnMaskType.padding,\n ):\n super().__init__(config=config)\n self.config: TransformerConfig = config\n\n self.layer_number = layer_number + self._get_layer_offset()\n\n self.self_attn_mask_type = self_attn_mask_type\n\n ## [Module 1: Input Layernorm] Optional Layernorm on the input data\n # TODO: add pytorch only layernorm\n self.input_layernorm = build_module(\n submodules.input_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 2: SelfAttention]\n self.self_attention = build_module(\n submodules.self_attention, config=self.config, layer_number=layer_number,\n )\n\n ## [Module 3: BiasDropoutFusion]\n self.self_attn_bda = build_module(submodules.self_attn_bda)\n\n ## [Module 4: Post SelfAttention] Optional Layernorm after self-attn\n self.pre_cross_attn_layernorm = build_module(\n submodules.pre_cross_attn_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 5: CrossAttention]\n self.cross_attention = build_module(\n submodules.cross_attention, config=self.config, layer_number=layer_number,\n )\n\n ## [Module 6: BiasDropoutFusion]\n self.cross_attn_bda = build_module(submodules.cross_attn_bda)\n\n ## [Module 7: Post Cross Attention] Optional Layernorm after cross-attn\n self.pre_mlp_layernorm = build_module(\n submodules.pre_mlp_layernorm,\n hidden_size=self.config.hidden_size,\n eps=self.config.layernorm_epsilon,\n persist_layer_norm=self.config.persist_layer_norm,\n sequence_parallel=self.config.sequence_parallel,\n zero_centered_gamma=self.config.layernorm_zero_centered_gamma,\n normalization=self.config.normalization,\n )\n\n ## [Module 8: MLP block]\n # TODO how to set the gpt_layer_spec.py when we have moe_frequency > 1,\n # where MLP and SwitchMLP both appear alternately?\n self.mlp = build_module(submodules.mlp, config=self.config)\n\n ## [Module 9: BiasDropoutFusion]\n self.mlp_bda = build_module(submodules.mlp_bda)\n\n # @jcasper how should we handle nvfuser?\n # Set bias+dropout+add fusion grad_enable execution handler.\n # TORCH_MAJOR = int(torch.__version__.split('.')[0])\n # TORCH_MINOR = int(torch.__version__.split('.')[1])\n # use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)\n # self.bias_dropout_add_exec_handler = nullcontext if use_nvfuser else torch.enable_grad\n self.bias_dropout_add_exec_handler = torch.enable_grad\n\n def _get_layer_offset(self):\n\n pipeline_rank = parallel_state.get_pipeline_model_parallel_rank()\n\n num_layers_per_pipeline_rank = (\n self.config.num_layers // parallel_state.get_pipeline_model_parallel_world_size()\n )\n\n if parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None:\n vp_rank = parallel_state.get_virtual_pipeline_model_parallel_rank()\n vp_size = parallel_state.get_virtual_pipeline_model_parallel_world_size()\n\n total_num_layers = self.config.num_layers\n num_layers_per_virtual_rank = num_layers_per_pipeline_rank // vp_size\n total_virtual_chunks = total_num_layers // vp_size\n offset = vp_rank * total_virtual_chunks + (pipeline_rank * num_layers_per_virtual_rank)\n\n else:\n # Each stage gets a contiguous set of layers.\n if parallel_state.get_pipeline_model_parallel_world_size() > 1:\n offset = pipeline_rank * num_layers_per_pipeline_rank\n else:\n offset = 0\n\n return offset\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n context=None,\n context_mask=None,\n inference_params=None,\n rotary_pos_emb=None,\n ):\n # hidden_states: [s, b, h]\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Input Layer norm\n input_layernorm_output = self.input_layernorm(hidden_states)\n\n # Self attention.\n attention_output_with_bias = self.self_attention(\n input_layernorm_output,\n attention_mask=attention_mask,\n inference_params=inference_params,\n rotary_pos_emb=rotary_pos_emb,\n )\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.self_attn_bda(self.training, self.config.bias_dropout_fusion)(\n attention_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Layer norm after self-attention\n pre_cross_attn_layernorm_output = self.pre_cross_attn_layernorm(hidden_states)\n\n # Cross attention.\n attention_output_with_bias = self.cross_attention(\n pre_cross_attn_layernorm_output,\n attention_mask=attention_mask,\n context=context,\n inference_params=inference_params,\n )\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.cross_attn_bda(self.training, self.config.bias_dropout_fusion)(\n attention_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Residual connection.\n residual = hidden_states\n\n # Optional Layer norm post the cross-attention.\n pre_mlp_layernorm_output = self.pre_mlp_layernorm(hidden_states)\n\n # MLP.\n mlp_output_with_bias = self.mlp(pre_mlp_layernorm_output)\n\n # TODO: could we move `bias_dropout_add_exec_handler` itself\n # inside the module provided in the `bias_dropout_add_spec` module?\n with self.bias_dropout_add_exec_handler():\n hidden_states = self.mlp_bda(self.training, self.config.bias_dropout_fusion)(\n mlp_output_with_bias, residual, self.config.hidden_dropout\n )\n\n # Jit compiled function creates 'view' tensor. This tensor\n # potentially gets saved in the MPU checkpoint function context,\n # which rejects view tensors. While making a viewless tensor here\n # won't result in memory savings (like the data loader, or\n # p2p_communication), it serves to document the origin of this\n # 'view' tensor.\n output = make_viewless_tensor(\n inp=hidden_states, requires_grad=hidden_states.requires_grad, keep_graph=True\n )\n\n return output\n\n def sharded_state_dict(self, prefix=''):\n\n # state_dict = self.state_dict(prefix=prefix, keep_vars=True)\n state_dict = self.state_dict(keep_vars=True)\n\n tensor_parallel_layers_axis_map = {\n 'self_attention.linear_qkv.weight': 0,\n 'self_attention.linear_qkv.bias': 0,\n 'self_attention.linear_proj.weight': 1,\n 'mlp.linear_fc1.weight': 0,\n 'mlp.linear_fc1.bias': 0,\n 'mlp.linear_fc2.weight': 1,\n }\n\n offset = self._get_layer_offset()\n num_layers = self.config.num_layers\n\n sharded_state_dict = {}\n\n for layer_name in state_dict.keys():\n tensor = state_dict[layer_name]\n global_layer_offset = self.layer_number - 1 # self.layer_number starts at 1\n layer_key = f'{prefix}{global_layer_offset - offset}.{layer_name}' # module list index in TransformerBlock\n sharded_offsets = [(0, global_layer_offset, num_layers)] # PP sharding\n\n if layer_name in tensor_parallel_layers_axis_map:\n tp_axis = tensor_parallel_layers_axis_map[layer_name]\n # TP sharding\n sharded_offsets.append(\n [\n tp_axis + 1, # +1 for PP dimension\n parallel_state.get_tensor_model_parallel_rank(),\n parallel_state.get_tensor_model_parallel_world_size(),\n ]\n )\n replica_id = parallel_state.get_data_parallel_rank()\n else:\n replica_id = (\n parallel_state.get_data_parallel_rank()\n * parallel_state.get_data_parallel_world_size()\n + parallel_state.get_tensor_model_parallel_rank()\n )\n\n if layer_name.endswith('._extra_state'):\n sharded_state_dict[layer_key] = ShardedObject(\n f'{prefix}{layer_name}',\n tensor,\n (num_layers,),\n (global_layer_offset,),\n replica_id,\n )\n\n else:\n sharded_state_dict[layer_key] = ShardedTensor.from_rank_offsets(\n f'{prefix}{layer_name}',\n tensor,\n *sharded_offsets,\n replica_id=replica_id,\n prepend_axis_num=1, # for PP sharding\n )\n\n return sharded_state_dict" }, { "identifier": "TransformerLayerSubmodules", "path": "megatron/core/transformer/transformer_layer.py", "snippet": "class TransformerLayerSubmodules:\n input_layernorm: Union[ModuleSpec, type] = IdentityOp\n self_attention: Union[ModuleSpec, type] = IdentityOp\n self_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_cross_attn_layernorm: Union[ModuleSpec, type] = IdentityOp\n cross_attention: Union[ModuleSpec, type] = IdentityOp\n cross_attn_bda: Union[ModuleSpec, type] = IdentityFuncOp\n\n pre_mlp_layernorm: Union[ModuleSpec, type] = IdentityOp\n mlp: Union[ModuleSpec, type] = IdentityOp\n mlp_bda: Union[ModuleSpec, type] = IdentityFuncOp" } ]
from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import ( TEDotProductAttention, TELayerNormColumnParallelLinear, TERowParallelLinear, ) from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.spec_utils import ModuleSpec from megatron.core.transformer.switch_mlp import SwitchMLP from megatron.core.transformer.transformer_layer import TransformerLayer, TransformerLayerSubmodules
12,884
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention,
# Use this spec to use lower level Transformer Engine modules (required for fp8 training) gpt_layer_with_transformer_engine_spec = ModuleSpec( module=TransformerLayer, submodules=TransformerLayerSubmodules( self_attention=ModuleSpec( module=SelfAttention,
params={"attn_mask_type": AttnMaskType.causal},
10
2023-12-07 08:29:38+00:00
16k
tommy-xq/SA2VP
vit_train_sa2vp.py
[ { "identifier": "create_optimizer", "path": "optim_factory.py", "snippet": "def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if skip_list is not None:\n skip = skip_list\n elif hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)\n weight_decay = 0.\n else:\n parameters = model.parameters()\n\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp':\n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp':\n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n # elif opt_lower == 'novograd':\n # optimizer = NovoGrad(parameters, **opt_args)\n # elif opt_lower == 'nvnovograd':\n # optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "get_parameter_groups", "path": "optim_factory.py", "snippet": "def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):\n parameter_group_names = {}\n parameter_group_vars = {}\n\n for name, param in model.named_parameters():\n if not param.requires_grad:\n continue # frozen weights\n if len(param.shape) == 1 or name.endswith(\".bias\") or name in skip_list:\n group_name = \"no_decay\"\n this_weight_decay = 0.\n else:\n group_name = \"decay\"\n this_weight_decay = weight_decay\n if get_num_layer is not None:\n layer_id = get_num_layer(name)\n group_name = \"layer_%d_%s\" % (layer_id, group_name)\n else:\n layer_id = None\n\n if group_name not in parameter_group_names:\n if get_layer_scale is not None:\n scale = get_layer_scale(layer_id)\n else:\n scale = 1.\n\n parameter_group_names[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n parameter_group_vars[group_name] = {\n \"weight_decay\": this_weight_decay,\n \"params\": [],\n \"lr_scale\": scale\n }\n\n parameter_group_vars[group_name][\"params\"].append(param)\n parameter_group_names[group_name][\"params\"].append(name)\n print(\"Param groups = %s\" % json.dumps(parameter_group_names, indent=2))\n return list(parameter_group_vars.values())" }, { "identifier": "LayerDecayValueAssigner", "path": "optim_factory.py", "snippet": "class LayerDecayValueAssigner(object):\n def __init__(self, values):\n self.values = values\n\n def get_scale(self, layer_id):\n return self.values[layer_id]\n\n def get_layer_id(self, var_name):\n return get_num_layer_for_vit(var_name, len(self.values))" }, { "identifier": "build_dataset", "path": "datasets.py", "snippet": "def build_dataset(is_train, args):\n # must choose one\n transform = build_transform_vtab(is_train, args)\n # transform = build_transform_fgvc(is_train, args)\n \n prefix_fgvc = './data/fgvc' # replace yours, sample:'./data/fgvc'\n prefix_vtab = './data/vtab-1k' # replace yours, sample:'./data/vtab-1k'\n \n if args.data_set == 'CIFAR_ori':\n dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)\n nb_classes = 100\n elif args.data_set == 'IMNET':\n root = os.path.join(args.data_path, 'train' if is_train else 'test')\n dataset = datasets.ImageFolder(root, transform=transform)\n nb_classes = 1000\n elif args.data_set == \"image_folder\":\n root = args.data_path if is_train else args.eval_data_path\n dataset = ImageFolder(root, transform=transform)\n nb_classes = args.nb_classes\n assert len(dataset.class_to_idx) == nb_classes\n elif args.data_set == 'CUB':\n if is_train:\n dataset = FGVC_cub(root=prefix_fgvc+'/CUB_200_2011', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_cub(root=prefix_fgvc+'/CUB_200_2011', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 200\n elif args.data_set == 'DOG':\n if is_train:\n dataset = FGVC_dog(root=prefix_fgvc+'/dogs', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_dog(root=prefix_fgvc+'/dogs', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 120\n elif args.data_set == 'FLOWER':\n if is_train:\n dataset = FGVC_flower(root=prefix_fgvc+'/OxfordFlower', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_flower(root=prefix_fgvc+'/OxfordFlower', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'CAR':\n if is_train:\n dataset = FGVC_car(root=prefix_fgvc+'/cars', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_car(root=prefix_fgvc+'/cars', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 196\n elif args.data_set == 'BIRD':\n if is_train:\n dataset = FGVC_bird(root=prefix_fgvc+'/nabirds', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = FGVC_bird(root=prefix_fgvc+'/nabirds', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 555\n elif args.data_set == 'CAL101':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/caltech101', my_mode=args.my_mode, train=True, transform=transform) # VTAB_attnmap\n else:\n dataset = VTAB(root=prefix_vtab+'/caltech101', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'CIFAR':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/cifar', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/cifar', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 100\n elif args.data_set == 'PATCH_CAMELYON':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/patch_camelyon', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/patch_camelyon', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 2\n elif args.data_set == 'EUROSAT':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/eurosat', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/eurosat', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 10\n elif args.data_set == 'DMLAB':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dmlab', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dmlab', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 6\n elif args.data_set == 'CLEVR_COUNT':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/clevr_count', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/clevr_count', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 8\n elif args.data_set == 'DTD':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dtd', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dtd', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 47\n elif args.data_set == 'FLOWER_S':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/oxford_flowers102', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/oxford_flowers102', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 102\n elif args.data_set == 'PET':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/oxford_iiit_pet', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/oxford_iiit_pet', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 37\n elif args.data_set == 'SVHN_S':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/svhn', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/svhn', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 10\n elif args.data_set == 'SUN':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/sun397', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/sun397', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 397\n elif args.data_set == 'Resisc45':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/resisc45', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/resisc45', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 45\n elif args.data_set == 'Retinopathy':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/diabetic_retinopathy', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/diabetic_retinopathy', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 5\n elif args.data_set == 'CLEVR_DISTANCE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/clevr_dist', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/clevr_dist', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 6\n elif args.data_set == 'KITTI_DISTANCE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/kitti', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/kitti', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 4\n elif args.data_set == 'DS_LOC':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dsprites_loc', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dsprites_loc', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 16\n elif args.data_set == 'DS_ORI':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/dsprites_ori', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/dsprites_ori', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 16\n elif args.data_set == 'SN_AZI':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_azi', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_azi', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 18\n elif args.data_set == 'SN_ELE':\n if is_train:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_ele', my_mode=args.my_mode, train=True, transform=transform)\n else:\n dataset = VTAB(root=prefix_vtab+'/smallnorb_ele', my_mode=args.my_mode, train=False, transform=transform)\n nb_classes = 9\n elif args.data_set == 'DTD_DAM':\n if is_train:\n dataset = DTD(root='/data/damvp_data/cal_all/dtd', split=\"train\", transform=transform) # note: remember to change data path.\n else:\n dataset = DTD(root='/data/damvp_data/cal_all/dtd', split=\"test\", transform=transform) # note: use 'val' to find best and then 'test'. when training, use 'val'.\n nb_classes = 47\n elif args.data_set == 'GTSRB_DAM':\n if is_train:\n dataset = GTSRB(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = GTSRB(root='/data/damvp_data/cal_all', split=\"test\", transform=transform)\n nb_classes = 43\n elif args.data_set == 'FOOD_DAM':\n if is_train:\n dataset = Food101(root='/data/data', split=\"train\", transform=transform)\n else:\n dataset = Food101(root='/data/data', split=\"test\", transform=transform)\n nb_classes = 101\n elif args.data_set == 'CIFAR10_DAM':\n if is_train:\n dataset = CIFAR10(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = CIFAR10(root='/data/damvp_data/cal_all', split=\"val\", transform=transform)\n nb_classes = 10\n elif args.data_set == 'CIFAR100_DAM':\n if is_train:\n dataset = CIFAR100(root='/data/damvp_data/cal_all', split=\"train\", transform=transform)\n else:\n dataset = CIFAR100(root='/data/damvp_data/cal_all', split=\"test\", transform=transform)\n nb_classes = 100\n elif args.data_set == 'SVHN_DAM':\n if is_train:\n dataset = SVHN(root='/data/damvp_data/cal_all/svhn', split=\"train\", transform=transform)\n else:\n dataset = SVHN(root='/data/damvp_data/cal_all/svhn', split=\"test\", transform=transform)\n nb_classes = 10\n else:\n raise NotImplementedError()\n assert nb_classes == args.nb_classes\n print(\"Number of the class = %d\" % args.nb_classes)\n\n return dataset, nb_classes" }, { "identifier": "build_beit_pretraining_dataset", "path": "datasets.py", "snippet": "def build_beit_pretraining_dataset(args):\n transform = DataAugmentationForBEiT(args)\n print(\"Data Aug = %s\" % str(transform))\n return ImageFolder(args.data_path, transform=transform)" }, { "identifier": "build_beit_pretraining_dataset_val", "path": "datasets.py", "snippet": "def build_beit_pretraining_dataset_val(args):\n transform = DataAugmentationForBEiT_val(args)\n return ImageFolder('/data/fgvc_deal/cub/test', transform=transform)" }, { "identifier": "train_one_epoch", "path": "engine_for_train.py", "snippet": "def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,\n start_steps=None, lr_schedule_values=None, wd_schedule_values=None,\n num_training_steps_per_epoch=None, update_freq=None):\n model.train(True)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n if loss_scaler is None:\n model.zero_grad()\n model.micro_steps = 0\n else:\n optimizer.zero_grad()\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n step = data_iter_step // update_freq\n if step >= num_training_steps_per_epoch:\n continue\n it = start_steps + step # global training iteration\n # Update LR & WD for the first acc\n if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:\n for i, param_group in enumerate(optimizer.param_groups):\n if lr_schedule_values is not None:\n param_group[\"lr\"] = lr_schedule_values[it] * param_group[\"lr_scale\"]\n if wd_schedule_values is not None and param_group[\"weight_decay\"] > 0:\n param_group[\"weight_decay\"] = wd_schedule_values[it]\n # print(samples)\n samples = samples.to(device, non_blocking=True)\n # images = images.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n if loss_scaler is None:\n samples = samples.half()\n loss, output = train_class_batch(\n model, samples, targets, criterion)# criterion_2, device\n else:\n with torch.cuda.amp.autocast():\n loss, output = train_class_batch(\n model, samples, targets, criterion)# criterion_2\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n if loss_scaler is None:\n loss /= update_freq\n model.backward(loss)\n model.step()\n\n if (data_iter_step + 1) % update_freq == 0:\n # model.zero_grad()\n # Deepspeed will call step() & model.zero_grad() automatic\n if model_ema is not None:\n model_ema.update(model)\n grad_norm = None\n loss_scale_value = get_loss_scale_for_deepspeed(model)\n else:\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss /= update_freq\n grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order,\n update_grad=(data_iter_step + 1) % update_freq == 0)\n if (data_iter_step + 1) % update_freq == 0:\n optimizer.zero_grad()\n if model_ema is not None:\n model_ema.update(model)\n loss_scale_value = loss_scaler.state_dict()[\"scale\"]\n\n torch.cuda.synchronize()\n\n if mixup_fn is None:\n class_acc = (output.max(-1)[-1] == targets).float().mean()\n else:\n class_acc = None\n metric_logger.update(loss=loss_value)\n metric_logger.update(class_acc=class_acc)\n metric_logger.update(loss_scale=loss_scale_value)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n metric_logger.update(min_lr=min_lr)\n weight_decay_value = None\n for group in optimizer.param_groups:\n if group[\"weight_decay\"] > 0:\n weight_decay_value = group[\"weight_decay\"]\n metric_logger.update(weight_decay=weight_decay_value)\n metric_logger.update(grad_norm=grad_norm)\n\n if log_writer is not None:\n log_writer.update(loss=loss_value, head=\"loss\")\n log_writer.update(class_acc=class_acc, head=\"loss\")\n log_writer.update(loss_scale=loss_scale_value, head=\"opt\")\n log_writer.update(lr=max_lr, head=\"opt\")\n log_writer.update(min_lr=min_lr, head=\"opt\")\n log_writer.update(weight_decay=weight_decay_value, head=\"opt\")\n log_writer.update(grad_norm=grad_norm, head=\"opt\")\n\n log_writer.set_step()\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "evaluate", "path": "engine_for_train.py", "snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n \n for batch in metric_logger.log_every(data_loader, 10, header):\n # samples, images = bs\n images = batch[0]\n target = batch[-1]\n images = images.to(device, non_blocking=True)\n # samples = samples.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n \n # compute output\n \n with torch.cuda.amp.autocast():\n output, prompt = model(images)\n loss = 0.8*criterion(output, target)+0.2*criterion(prompt, target)\n \n # acc1, acc5 = accuracy(output, target, topk=(1, 5))\n acc1 = accuracy(output, target, topk=(1, 5))[0]\n\n batch_size = target.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n \n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n \"\"\"\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n \"\"\"\n print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "NativeScalerWithGradNormCount", "path": "utils.py", "snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)" }, { "identifier": "VisionTransformer", "path": "vpt_main/src/models/vit_backbones/vit_tinypara.py", "snippet": "class VisionTransformer(nn.Module):\n def __init__(\n self, model_type,\n img_size=224, num_classes=21843, vis=False\n ):\n super(VisionTransformer, self).__init__()\n config = CONFIGS[model_type]\n self.num_classes = num_classes\n self.classifier = config.classifier\n\n self.transformer = Transformer(config, img_size, vis)\n self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()\n\n def forward(self, x, vis=False):\n x, attn_weights, ppt = self.transformer(x)\n logits = self.head(x[:, 0])\n # logits = self.head(x[:, 1:].mean(dim=1))\n\n if not vis:\n return logits, ppt[:, 0]\n # return logits, ppt[:, 1:].mean(dim=1), global_ppt.mean(dim=1)\n \n print(\"return logits and attention --------------------\")\n return logits, attn_weights # attn_weights: num_layers, B, num_head, num_patches, num_patches\n \n def forward_cls_layerwise(self, x):\n print(\"do this vit part -----------------------\")\n cls_embeds = self.transformer.forward_cls_layerwise(x)\n return cls_embeds\n\n def load_from(self, weights):\n with torch.no_grad():\n self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights[\"embedding/kernel\"], conv=True))\n self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights[\"embedding/bias\"]))\n self.transformer.embeddings.cls_token.copy_(np2th(weights[\"cls\"]))\n self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights[\"Transformer/encoder_norm/scale\"]))\n self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights[\"Transformer/encoder_norm/bias\"]))\n\n posemb = np2th(weights[\"Transformer/posembed_input/pos_embedding\"])\n posemb_new = self.transformer.embeddings.position_embeddings\n if posemb.size() == posemb_new.size():\n self.transformer.embeddings.position_embeddings.copy_(posemb)\n else:\n logger.info(\"load_pretrained: resized variant: %s to %s\" % (posemb.size(), posemb_new.size()))\n ntok_new = posemb_new.size(1)\n\n if self.classifier == \"token\":\n posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]\n ntok_new -= 1\n else:\n posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n\n gs_old = int(np.sqrt(len(posemb_grid)))\n gs_new = int(np.sqrt(ntok_new))\n print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))\n posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)\n\n zoom = (gs_new / gs_old, gs_new / gs_old, 1)\n posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)\n posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)\n posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)\n self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))\n\n for bname, block in self.transformer.encoder.named_children():\n if bname == 'cross_conv' or bname == 'encoder_norm_2' or bname == 'deep_ppt' or bname == 'deep_proj':\n pass\n else:\n for uname, unit in block.named_children():\n # print(uname, unit)\n unit.load_from(weights, n_block=uname)\n\n if self.transformer.embeddings.hybrid:\n self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights[\"conv_root/kernel\"], conv=True))\n gn_weight = np2th(weights[\"gn_root/scale\"]).view(-1)\n gn_bias = np2th(weights[\"gn_root/bias\"]).view(-1)\n self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)\n self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)\n\n for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():\n for uname, unit in block.named_children():\n unit.load_from(weights, n_block=bname, n_unit=uname)" } ]
import argparse import datetime import numpy as np import time import torch import torch.nn as nn import torch.backends.cudnn as cudnn import json import os import utils import random import deepspeed from pathlib import Path from time import sleep from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner from datasets import build_dataset from datasets import build_beit_pretraining_dataset, build_beit_pretraining_dataset_val from engine_for_train import train_one_epoch, evaluate # engine for sa2vp from utils import NativeScalerWithGradNormCount as NativeScaler from scipy import interpolate from timm.models.layers import trunc_normal_ from functools import partial from vpt_main.src.models.vit_backbones.vit_tinypara import VisionTransformer # choose model to train from deepspeed import DeepSpeedConfig
11,119
dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.vit_base.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.vit_base.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.vit_base.parameters()) print('------------------------------') # print to show parameters are frozen or learnable for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) # skip_weight_decay_list = model.no_weight_decay() skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval:
# -------------------------------------------------------- # SA2VP: Spatially Aligned-and-Adapted Visual Prompt code # reference: # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Based on timm # https://github.com/rwightman/pytorch-image-models/tree/master/timm # --------------------------------------------------------' #os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID' # can be removed and set in bash #os.environ['CUDA_VISIBLE_DEVICES']='0' # can be removed and set in bash class Dual_model(nn.Module): def __init__(self, args): super(Dual_model, self).__init__() self.vit_base = VisionTransformer("sup_vitb16_imagenet21k", 224, num_classes=-1, vis=False) # choose ViT as backbone self.vit_base.load_from(np.load(os.path.join("./backbone_ckpt", "imagenet21k_ViT-B_16.npz"))) # where to save pre-trained model ./backbone_ckpt # show parameters is frozen or learnable for k, p in self.vit_base.named_parameters(): name_list = k.split('.') # print(name_list) if name_list[2] == 'ppt' or name_list[2] == 'cross_conv' or name_list[2] == 'deep_ppt' or name_list[2] == 'encoder_norm_2' or name_list[2] == 'deep_proj' or name_list[2] == 'ppt_proj': p.requires_grad = True elif name_list[2] == 'cross_attn': if name_list[4] == 'attn' or name_list[4] == 'attention_norm': p.requires_grad = False else: p.requires_grad = True elif name_list[2] == 'layer': if name_list[4] == 'down_proj' or name_list[4] == 'up_proj' or name_list[4] == 'before_norm': p.requires_grad = True else: p.requires_grad = False else: p.requires_grad = False self.class_head = nn.Linear(768, args.nb_classes, bias=True) trunc_normal_(self.class_head.weight, std=0.02) def forward(self, x): x, p = self.vit_base(x) # B*768 return self.class_head(x), self.class_head(p) def get_args(): parser = argparse.ArgumentParser('SA2VP script for image classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=30, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=20, type=int) # Model parameters parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--rel_pos_bias', action='store_true') parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') parser.set_defaults(rel_pos_bias=False) parser.add_argument('--abs_pos_emb', action='store_true') parser.set_defaults(abs_pos_emb=True) parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--second_input_size', default=112, type=int, help='images input size for discrete vae') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD and using a larger decay by the end of training improves performance for ViTs.""") parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--layer_decay', type=float, default=0.9) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0, help='Label smoothing (default: 0)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")') # Evaluation parameters parser.add_argument('--crop_pct', type=float, default=None) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # * Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--init_scale', default=0.001, type=float) parser.add_argument('--use_mean_pooling', action='store_true') parser.set_defaults(use_mean_pooling=True) parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False) # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--my_mode', default='train_val', type=str, help='my mode to train or test') parser.add_argument('--eval_data_path', default=None, type=str, help='dataset path for evaluation') parser.add_argument('--nb_classes', default=0, type=int, help='number of the classification types') parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') parser.add_argument('--data_set', default='CUB', choices=['CIFAR', 'IMNET', 'image_folder', 'CUB', 'DOG', 'FLOWER', 'CAR', 'BIRD', 'CAL101', 'DMLAB','EUROSAT','PATCH_CAMELYON','Resisc45','Retinopathy','CLEVR_COUNT','CIFAR100','FOOD101','SVHN','DTD','FLOWER_S','PET','SVHN_S','SUN','CLEVR_DISTANCE','KITTI_DISTANCE','DS_LOC','DS_ORI','SN_AZI','SN_ELE', 'DTD_DAM', 'GTSRB_DAM', 'FOOD_DAM', 'CIFAR10_DAM', 'CIFAR100_DAM', 'SVHN_DAM'], type=str, help='ImageNet dataset path') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--enable_deepspeed', action='store_true', default=False) known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed==0.4.0'") exit(0) else: ds_init = None return parser.parse_args(), ds_init def main(args, ds_init): utils.init_distributed_mode(args) if ds_init is not None: utils.create_ds_config(args) print(args) device = torch.device(args.device) seed = 42 torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) if args.disable_eval_during_finetuning: dataset_val = None else: dataset_val, _ = build_dataset(is_train=False, args=args) print("Calculation of training examples = %d" % len(dataset_train)) print("Calculation of other examples = %d" % len(dataset_val)) if True: # args.distributed: num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) else: sampler_train = torch.utils.data.RandomSampler(dataset_train) sampler_val = torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=int(4*args.batch_size), num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False ) else: data_loader_val = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = Dual_model(args) n_parameters = sum(p.numel() for p in model.vit_base.parameters() if p.requires_grad) frozen_parameters = sum(p.numel() for p in model.vit_base.parameters() if not p.requires_grad) total_parameters = sum(p.numel() for p in model.vit_base.parameters()) print('------------------------------') # print to show parameters are frozen or learnable for name, param in model.named_parameters(): print(name, param.requires_grad) print('------------------------------') model.to(device) model_ema = None if args.model_ema: # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper model_ema = ModelEma( model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else '', resume='') print("Using EMA with decay = %.8f" % args.model_ema_decay) model_without_ddp = model # print("Model = %s" % str(model_without_ddp)) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) # skip_weight_decay_list = model.no_weight_decay() skip_weight_decay_list = None if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval:
test_stats = evaluate(data_loader_val, model, device)
7
2023-12-12 13:19:17+00:00
16k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n list of dict: The list of queue pair (QP) information if successful or None otherwise.\n The list of QP information is in the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n \"\"\"\n try:\n with open(switch_msg_snapshot, 'r') as stream:\n qp_info_list = yaml.safe_load(stream)\n except:\n logging.error(\"Read switch message snapshot %s error.\" % switch_msg_snapshot)\n return None\n\n logging.info(\"Read switch message snapshot %s.\" % switch_msg_snapshot)\n return qp_info_list" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "LatencyMeasure", "path": "lumina/analyzer/measurer/latency_measure.py", "snippet": "class LatencyMeasure:\n \"\"\" Class to measure the latency between packets for some events,\n e.g., NACK latency, Retransmission latency, CNP latency\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb\n \"\"\"\n def __init__(self, packet_list, qp_info_list, is_read=False):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb (default: False)\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.qp_info_list = qp_info_list\n self.is_read = is_read\n\n def get_peer_qp_info(self, dest_qpn, dest_ip):\n \"\"\" Get the info of the peer QP (qpn, ip) of a given qp (qpn, ip)\n\n Args:\n dest_qpn (int): destination QP number\n dest_ip (str): destination IP\n\n Returns:\n int: peer QP number (None if not found)\n str: peer IP (None if not found)\n \"\"\"\n for qp_info in self.qp_info_list:\n if qp_info['qpn_snd'] == dest_qpn and qp_info['ip_snd'] == dest_ip:\n return qp_info['qpn_rcv'], qp_info['ip_rcv']\n elif qp_info['qpn_rcv'] == dest_qpn and qp_info['ip_rcv'] == dest_ip:\n return qp_info['qpn_snd'], qp_info['ip_snd']\n\n return None, None\n\n def get_bit_error_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with bit error flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with bit error flag\n \"\"\"\n error_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_bit_error() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n error_pkt_list.append(packet)\n\n return error_pkt_list\n\n def get_dropped_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with drop flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with drop flag\n \"\"\"\n dropped_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_dropped() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n dropped_pkt_list.append(packet)\n\n return dropped_pkt_list\n\n def get_ecn_pkts(self):\n \"\"\" Get the packets marked with ECN\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with ECN\n \"\"\"\n ecn_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_ecn():\n ecn_pkt_list.append(packet)\n\n return ecn_pkt_list\n\n def get_cnp_pkts(self):\n \"\"\" Get the congestion notification packets\n\n Returns:\n list of RRoCEPacket objects: the list of congestion notification packets\n \"\"\"\n cnp_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_cnp():\n cnp_pkt_list.append(packet)\n\n return cnp_pkt_list\n\n def get_undelivered_pkts(self, relative_dest_qpn = None):\n \"\"\" Get the undelivered packets (dropped or marked with bit error)\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of undelivered packets\n \"\"\"\n undelivered_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_delivered() == True:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n undelivered_pkt_list.append(packet)\n\n return undelivered_pkt_list\n\n def get_nack(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return the NACK packet that triggers its retransmission.\n If there's no NACK packet found for the undelivered packet, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet that triggers retransmission\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the NACK packet that triggers the retransmission of the undelivered packet\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() == undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_qp_first_nack_before_retrans(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the first NACK packet on its QP between it and its retransmission.\n If there's no NACK packet found before the retransmission, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the first NACK packet on the QP between the undelivered packet and its retransmission\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() <= undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return packet\n\n return None\n\n def get_qp_next_delivered_pkt(self, current_pkt):\n \"\"\" For a packet, return the next delivered packet on the same QP.\n\n Args:\n current_pkt (RRoCEPacket object): the current packet\n\n Returns:\n RRoCEPacket object: the next delivered packet on the same QP (None if not found)\n \"\"\"\n switch_seqnum = current_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_qp_roce_data_pkt(packet, current_pkt) and \\\n packet.get_switch_seqnum() > switch_seqnum and \\\n packet.is_delivered():\n return packet\n\n return None\n\n def get_retransmit_pkt(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return its retransmission packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the retransmission packet of the undelivered packet (None if not found)\n \"\"\"\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_latency_between_pkts(self, packet_alpha, packet_beta):\n \"\"\" Return the time of packet_beta - time of packet_alpha in seconds\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n float: the time difference between two packets in seconds\n \"\"\"\n return packet_beta.get_switch_timestamp() - packet_alpha.get_switch_timestamp()\n\n def is_same_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are the same RoCE data packet (same src ip, dst ip, dest qp, and psn)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are the same RoCE data packet, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp() and \\\n packet_alpha.get_roce_pkt_seq() == packet_beta.get_roce_pkt_seq()\n\n def is_same_qp_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are RoCE data packets on the same QP (same src ip, dst ip, and dest qp)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are RoCE data packets on the same QP, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp()\n\n def get_qp_next_delivered_pkt_latency(self, pkt):\n \"\"\" Get the latency between 'pkt' and next 'delivered' packet on the same QP\n\n Args:\n pkt (RRoCEPacket object): the packet\n\n Returns:\n float: the latency between 'pkt' and next 'delivered' packet on the same QP\n (None if not found)\n \"\"\"\n\n next_pkt = self.get_qp_next_delivered_pkt(pkt)\n if next_pkt is None:\n return None\n\n return self.get_latency_between_pkts(pkt, next_pkt)\n\n def get_nack_gen_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK generation latency, i.e., the duration from the detection of\n the undelivered packet to the generation of the NACK packet that triggers its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK generation latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n # NACK should be triggered by the next delivered packet on the same QP\n next_delivered_pkt = self.get_qp_next_delivered_pkt(undelivered_pkt)\n if self.is_same_roce_data_pkt(next_delivered_pkt, undelivered_pkt):\n # We should never reach here\n return None\n\n nack_gen_latency = self.get_latency_between_pkts(next_delivered_pkt, nack_pkt)\n return nack_gen_latency\n\n def get_nack_resp_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK response latency, i.e., the duration from the generation of\n the NACK packet to the retransmission of this undelivered packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK response latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n nack_resp_latency = self.get_latency_between_pkts(nack_pkt, retransmit_pkt)\n return nack_resp_latency\n\n def get_retransmit_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the retransmission latency, i.e., the duration from the packet\n to its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the retransmission latency for the undelivered packet (None if not found)\n \"\"\"\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n return retransmit_latency\n\n def get_nack_gen_latency_list(self, relative_dest_qpn=None):\n \"\"\" Return a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n nack_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n nack_latency_list.append(None)\n else:\n nack_latency = self.get_latency_between_pkts(undelivered_pkt, nack_pkt)\n nack_latency_list.append(nack_latency)\n\n return nack_latency_list\n\n def get_retransmit_latency_list(self, relative_dest_qpn):\n \"\"\" Return a list of retransmission latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of retransmission latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n retransmit_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n retransmit_latency_list.append(None)\n else:\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n retransmit_latency_list.append(retransmit_latency)\n\n return retransmit_latency_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "TRIGGER_OOS", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_OOS = 1" }, { "identifier": "TRIGGER_TIMEOUT", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_TIMEOUT = 2" } ]
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
13,821
RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6))
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6))
elif trigger == TRIGGER_TIMEOUT:
10
2023-12-09 08:21:14+00:00
16k
boweniac/autogan
autogan/agents/tool_agent_search.py
[ { "identifier": "CodeExecution", "path": "autogan/tools/code_execution_tool.py", "snippet": "class CodeExecution:\n def __init__(self, work_dir: Optional[str] = None):\n \"\"\"A class for code execution\n 用于代码执行的类\n\n Supports python, bash, shell, powershell code\n 支持 python, bash, shell, powershell 代码\n\n Please note when using:\n 使用时请注意:\n\n 1.Code must be encapsulated with ``` symbol\n 1.代码必须使用 ``` 符号封装\n\n 2.Must be run in a docker environment\n 2.须在 docker 环境中运行\n\n :param work_dir: The relative path for code execution, default is extensions\n 执行代码的相对路径,默认为 extensions\n \"\"\"\n if work_dir is None:\n work_dir = \"extensions\"\n self._work_dir = os.getcwd() + \"/\" + work_dir\n self._win32 = sys.platform == \"win32\"\n self._path_separator = self._win32 and \"\\\\\" or \"/\"\n\n def code_execution_reply(self, text: str) -> Tuple[str, int]:\n \"\"\"Execute code and return result\n 执行代码并返回结果\n\n :param text: Code must be encapsulated with ``` symbol\n 代码必须使用 ``` 符号封装\n\n :return:\n --execution_result: Execution result\n 执行结果\n --tokens: Tokens of the execution result\n 执行结果的 tokens\n \"\"\"\n\n # Determine whether it is running in docker\n if os.path.exists(\"/.dockerenv\"):\n lang, code = self.extract_code(text)\n if code is None:\n exitcode = 1\n output = \"Submit your Python code to me and I can tell you the execution result. But I can't write code or talk to you. So please just submit the completed code to me encapsulated with ``` symbols. And you should always use the 'print' function for the output\"\n else:\n exitcode, output = self.execute(code, lang=lang)\n else:\n exitcode = 1\n output = \"executing code needs to run in a docker environment\"\n\n if not output:\n exitcode = 1\n output = \"You should always use the 'print' function for the output\"\n\n result = \"execution succeeded\" if exitcode == 0 else \"execution failed\"\n if exitcode != 0:\n output += \"\\nIf you need to install dependencies, you can send me the code for installing dependencies. Like ```pip install openai```\"\n execution_result = f\"exitcode: {exitcode} ({result})\\n{output}\"\n else:\n execution_result = f\"exitcode: {exitcode} ({result})\\nCode output: \\n{output}\"\n tokens = count_text_tokens(execution_result)\n\n return execution_result, tokens\n\n def execute(\n self,\n code: str,\n lang: Optional[str] = None,\n timeout: Optional[int] = 600,\n ) -> Tuple[int, str]:\n \"\"\"Execute code\n 执行代码\n\n :param code: Code to be executed\n :param lang: Code language, if empty, will try to infer the language from the code\n :param timeout: Maximum code execution time (seconds)\n\n :return:\n --exitcode: exitcode\n --output: Execution result\n \"\"\"\n try:\n if not lang:\n lang = self.infer_lang(code)\n\n if lang not in [\"bash\", \"shell\", \"sh\", \"python\", \"Python\"]:\n return 1, \"unknown language\"\n\n print(\n colored(\n f\"\\n\\n>>>>>>>> EXECUTING CODE BLOCK (language is {lang})...\",\n \"red\",\n ),\n flush=True,\n )\n\n if self._win32 and lang in [\"sh\", \"shell\"]:\n lang = \"ps1\"\n\n # Create a temporary file\n code_hash = md5(code.encode()).hexdigest()\n filename = f\"tmp_code_{code_hash}.{'py' if lang.startswith('python') else lang}\"\n filepath = os.path.join(self._work_dir, filename)\n file_dir = os.path.dirname(filepath)\n os.makedirs(file_dir, exist_ok=True)\n\n # Write the code into a temporary file\n with open(filepath, \"w\", encoding=\"utf-8\") as tmp_code:\n tmp_code.write(code)\n\n # Execute code\n cmd = [\n sys.executable if lang.startswith(\"python\") or lang.startswith(\"Python\") else self._cmd(lang),\n f\".\\\\{filename}\" if self._win32 else filename,\n ]\n if self._win32:\n result = subprocess.run(\n cmd,\n cwd=self._work_dir,\n capture_output=True,\n text=True,\n )\n else:\n signal.signal(signal.SIGALRM, self._timeout_handler)\n try:\n signal.alarm(timeout)\n # run the code in a subprocess in the current docker container in the working directory\n result = subprocess.run(\n cmd,\n cwd=self._work_dir,\n capture_output=True,\n text=True,\n )\n signal.alarm(0)\n except TimeoutError:\n os.remove(filepath)\n return 1, \"Timeout\"\n\n os.remove(filepath)\n if result.returncode:\n logs = result.stderr\n abs_path = str(pathlib.Path(filepath).absolute())\n logs = logs.replace(str(abs_path), \"\").replace(filename, \"\")\n else:\n logs = result.stdout\n\n return result.returncode, logs\n except Exception as e:\n return 1, f\"execution error: {e}\"\n\n @staticmethod\n def extract_code(text: str) -> Tuple[Optional[str], Optional[str]]:\n \"\"\"Extract code from text\n\n :param text: 包含代码的文本,代码必须以```符号封装\n\n :return:\n --lang: Code must be encapsulated with ``` symbol\n --code: Code to be executed\n \"\"\"\n match = re.findall(r\"```(\\w*)\\n(.*?)\\n```\", text, flags=re.DOTALL)\n return match[0] if match else (None, None)\n\n @staticmethod\n def infer_lang(code) -> str:\n \"\"\"Infer code language\n\n :param code: Code to be executed\n\n :return: The inferred code language, if the inference fails, it will return unknown\n \"\"\"\n if (code.startswith(\"python \") or code.startswith(\"pip\") or code.startswith(\"python3 \")\n or code.startswith(\"pip3\")):\n return \"sh\"\n\n try:\n compile(code, \"test\", \"exec\")\n return \"python\"\n except SyntaxError:\n return \"unknown\"\n\n @staticmethod\n def _timeout_handler(signum, frame):\n raise TimeoutError(\"Timed out!\")\n\n @staticmethod\n def _cmd(lang):\n if lang.startswith(\"python\") or lang in [\"bash\", \"sh\", \"powershell\"]:\n return lang\n if lang in [\"shell\"]:\n return \"sh\"\n if lang in [\"ps1\"]:\n return \"powershell\"\n raise NotImplementedError(f\"{lang} not recognized in code execution\")" }, { "identifier": "WolframAlphaAPIWrapper", "path": "autogan/tools/wolfram_alpha_tool.py", "snippet": "class WolframAlphaAPIWrapper:\n def __init__(self, wolfram_config: Dict):\n \"\"\"Wrapper for Wolfram Alpha.\n\n :param wolfram_config: JSON format of email_config\n {\"app_id\": \"\"}\n \"\"\"\n self._wolfram_client = wolframalpha.Client(wolfram_config['app_id'])\n\n def run(self, query: str) -> Optional[str]:\n from urllib.error import HTTPError\n\n res = None\n for _ in range(20):\n try:\n res = self._wolfram_client.query(query)\n break\n except HTTPError:\n sleep(1)\n except Exception:\n return None\n if res is None:\n return None\n\n try:\n if not res[\"@success\"]:\n return None\n assumption = next(res.pods).text\n answer = \"\"\n for result in res[\"pod\"]:\n if result[\"@title\"] == \"Solution\":\n answer = result[\"subpod\"][\"plaintext\"]\n if result[\"@title\"] == \"Results\" or result[\"@title\"] == \"Solutions\":\n for i, sub in enumerate(result[\"subpod\"]):\n answer += f\"ans {i}: \" + sub[\"plaintext\"] + \"\\n\"\n break\n if answer == \"\":\n answer = next(res.results).text\n\n except Exception:\n return None\n\n if answer is None or answer == \"\":\n return None\n\n return f\"Assumption: {assumption} \\nAnswer: {answer}\"" }, { "identifier": "count_text_tokens", "path": "autogan/oai/count_tokens_utils.py", "snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens" }, { "identifier": "UniversalAgent", "path": "autogan/agents/universal_agent.py", "snippet": "class UniversalAgent:\n def __init__(\n self,\n name: str,\n agent_config: Optional[Dict] = None,\n duty: Optional[str] = None,\n work_flow: Optional[str] = None,\n use_tool: Optional[str] = None, # only | join\n super_rich: Optional[str] = None, # auto | on | off\n stream_mode: Optional[bool] = None,\n ):\n \"\"\"Agent base class\n\n Each agent can communicate with other agents in the current department and the leader of the subordinate department to complete tasks together.\n 每个 agent 可与当前部门的其他 agent 以及下级部门的 leader 沟通,协作完成任务。\n\n To provide functions beyond the modeling capabilities for the agent, you can override the tool_function method.\n 想要为 agent 提供模型能力之外的功能,可以通过重写 tool_function 方法来实现。\n\n :param name: The agent name should be unique in the organizational structure.\n agent name 在组织架构中应当是唯一的。\n :param agent_config: The agent configuration includes:\n agent 配置包括:\n - main_model: The LLM configuration of the agent's main body.\n agent 主体的 LLM 配置。\n - summary_model: The LLM configuration used for compressing context and generating text summaries.\n 用于压缩上下文以及生成文本摘要的 LLM 配置。\n - request_interval_time: The interval time of LLM requests.\n LLM 请求间隔时间。\n - request_timeout:The timeout of LLM requests.\n LLM 请求超时时间。\n - max_retries: The maximum number of retries for LLM requests.\n LLM 请求最大重试次数。\n :param duty: Used to explain one's job responsibilities to other agents.\n 用于向其他 agent 说明自己的工作职责。\n :param work_flow: Defines the workflow of the agent.\n 定义 agent 的工作流程。\n :param use_tool: Defines the mode of the agent using the tool_function:\n 定义 agent 使用 tool_function 的模式:\n - None: means not using the tool function.\n 不使用工具函数。\n - only: Do not use the LLM, only use the tool function to generate results.\n 不使用 LLM,仅使用工具函数生成结果。\n - join: The content generated by the LLM will be used as the input parameter for the tool_function.\n LLM 生成的内容将作为 tool_function 的输入参数\n :param super_rich: Whether to enable the deep thought function. When enabled,\n it uses a set of analysis processes to refine the output of the agent. However,\n this can increase the number of tokens used, so it is not recommended for use with the gpt-4 model.\n The name \"super_rich\" is a reminder that using this function with gpt-4 can be expensive,\n even more so than Elon Musk's earning speed.\n 是否开启深思功能,开启后会使用一套分析流程来收敛 agent 的输出结果,但这样做会增加 tokens 的消耗,因此不建议在gpt-4模型下使用。\n 之所以这个参数叫 super_rich ,是为了提醒用户,如果在 gpt-4 下使用,其花钱的速度可能会超过马斯克赚钱的速度。\n - auto: Disable for GPT-4, enable for other models\n 在 gpt-4下禁用,其他模型开启\n - on: Always enabled\n 始终开启\n - off: Always disabled\n 始终关闭\n :param stream_mode: Whether to enable the stream_mode\n 定义 agent 的工作流程。\n \"\"\"\n self.name = name\n self.agent_config = AgentConfig(agent_config) if agent_config else None\n self.duty = duty\n self.super_rich = super_rich # auto | on | off\n self.stream_mode = stream_mode\n self.response_func = default_response_func # Used to return results to the interface or terminal.\n self.workmates = \"\" # relevant personnel's name and duty\n self.pipeline = \"\" # In a linear workflow, this is the next person to communicate with.\n # Translate the session ID of the pusher into the sub-session ID of the receiver.\n self.sub_to_main_task_id = defaultdict(str)\n # Translate the session id of the sender into the superior session id of the receiver.\n self.main_to_sub_task_id = defaultdict(str)\n self._work_flow = work_flow\n self._use_tool = use_tool # only | join\n self._conversation_messages = defaultdict(list) # key: task id,value: Conversation history\n self._conversation_focus = defaultdict(Dict) # key: task id,value: {\"task_issuer\": \"\", \"task_content\": \"\"}\n\n def set_agent_config(self, agent_config: Dict):\n self.agent_config = AgentConfig(agent_config)\n\n def new_task(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,\n completion_tokens: int):\n \"\"\"Accept tasks posted by other agent.\n\n :param switch: AgentSwitch object\n :param task_id: New task id\n :param sender_name: Task Issuer's Name\n :param content: Task content\n :param completion_tokens: Task content tokens\n \"\"\"\n # Avoid excessively long task content\n if (self._use_tool != \"only\" and completion_tokens >\n self.agent_config.main_model_config.max_messages_tokens * 0.5):\n self._push_to_switch(switch, task_id, \"The task is too long\", 5)\n\n # Cache task information to maintain focus during task execution\n task_content = content.replace(f\"@{self.name}\", \"please help me\")\n task_content = task_content.replace(f\"{switch.task_tag}\", \"\")\n self._conversation_focus[task_id] = {'task_issuer': sender_name, 'task_content': task_content}\n # Start the generation process\n self._generate_process(switch, task_id, sender_name, content, completion_tokens)\n\n def receive(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,\n completion_tokens: int):\n \"\"\"Receive messages sent by other agents (excluding new task requests)\n\n :param switch: AgentSwitch object\n :param task_id: Task id\n :param sender_name: Name of the agent sending the message\n :param content: Message content\n :param completion_tokens: Message content tokens\n \"\"\"\n if self._use_tool != \"only\":\n safe_size = self.agent_config.main_model_config.max_messages_tokens\n if completion_tokens > safe_size:\n # 如消息内容过长,则对其进行压缩\n compressed_text, total_tokens = compressed_text_universal(\n content, self.agent_config.summary_model_config,\n self.name, self.response_func, self.stream_mode,\n self._conversation_focus[task_id]['task_content'], safe_size)\n if compressed_text:\n content = compressed_text\n completion_tokens = total_tokens\n\n # Press the message into the session record of the current task\n self._conversation_messages[task_id].append(\n {'role': 'user', 'content': content, 'tokens': completion_tokens})\n\n # Start the generation process\n self._generate_process(switch, task_id, sender_name, content, completion_tokens)\n\n def tool_function(self, task_id: str, param: Optional[str] = None,\n tokens: Optional[int] = None) -> tuple[str, int]:\n \"\"\"When the value of the use_tool parameter is 'only' or 'join', please override this method.\n\n :return: --content: Generate content\n --tokens: Generate content tokens\n \"\"\"\n pass\n\n def _base_message(self, switch: AgentSwitch, task_id: str) \\\n -> tuple[dict[str, str], Optional[dict[str, Any]], int]:\n \"\"\"This is the paradigm message required for each round of dialogue.\n 每轮对话都需要的范式消息\n\n :param switch: AgentSwitch object\n :param task_id: Task id\n\n :return:\n -- system_message: Used to clarify its own workflow to the agent and where the agent can seek help.\n 用于向 agent 阐明自身工作流程,以及可以向哪些 agent 寻求帮助。\n -- focus_message: Used to maintain focus during task execution, including who is currently executing the task and what the content of the task is. It will not be forgotten or compressed with the increase of dialogue rounds.\n 用于在任务执行过程中保持专注力,包括当前正在执行谁发布的任务、任务的内容是什么。不会随会话轮次的增多而被遗忘或压缩。\n -- total_tokens: The overall tokens of the content of the system_message and the focus_message.\n system_message 以及 focus_message 内容的整体 tokens。\n \"\"\"\n total_tokens = 0\n\n info = environment_info()\n\n # Assemble system message\n system_prompt = f\"\"\"Now your name is {self.name}, you are an assistant who will not give up easily when you encounter difficulties\n\nEnvironment information:\n{info}\"\"\"\n\n if self._work_flow:\n system_prompt += f\"\"\"\n\nYour work flow is::\n{self._work_flow}\"\"\"\n\n if self.workmates:\n system_prompt += f\"\"\"\n\nThe following professionals can help you accomplish the task:\n{self.workmates}\"\"\"\n\n if self._use_tool is None:\n system_prompt += f\"\"\"\n \n Please follow these guidelines when replying to any content:\n 1. Be aware that if you do not @recipient at the beginning, the system will give an error.\n 2. When asking for help, you need to first post a task, the method is: @recipient {switch.task_tag} task content.\n 3. The recipient does not have any dialogue records before the task begins, nor can they see your conversations with others.\n 4. Do not suggest the recipient to communicate with others.\n 5. Do not explain to the initiator of the task what you are going to do.\n 6. In the reply, do not converse with two recipients at the same time.\n \"\"\"\n\n total_tokens += 37\n\n system_message = {'role': 'system', 'content': system_prompt}\n if task_id in self._conversation_focus and self._conversation_focus[task_id]:\n # Assemble focus message\n focus_prompt = f\"\"\"current task content:\ntask issuer: {self._conversation_focus[task_id]['task_issuer']}\ntask content: {self._conversation_focus[task_id]['task_content']}\"\"\"\n\n if self._use_tool is None:\n if self.pipeline and self.pipeline != \"\\\\\":\n focus_prompt += f\"\"\"\n\nWhen you have the result of the task, please @{self.pipeline} {switch.task_tag} and reply to the execution result, He'll know what to do next\"\"\"\n else:\n focus_prompt += f\"\"\"\n\nWhen you have the result of the task, please @{self._conversation_focus[task_id]['task_issuer']} and reply to the execution result\"\"\"\n\n total_tokens += count_text_tokens(focus_prompt)\n\n focus_message = {'role': 'user', 'content': focus_prompt}\n else:\n focus_message = None\n\n return system_message, focus_message, total_tokens\n\n def _super_rich_message(self, switch: AgentSwitch, task_id: str, ideas: dict, index: int)\\\n -> tuple[list[str, dict], bool]:\n \"\"\"Thought prompts, with new content requested at each level\n 深思提示词,每层请求新的内容\n\n :param switch: AgentSwitch object\n :param task_id: Task id\n :param ideas: Results generated\n :param index: Current thinking depth\n\n :return:\n -- message_list: Thought prompts list\n -- tag:\n -- message: Thought prompts\n -- is_end:\n \"\"\"\n messages = []\n\n task_issuer = \"\"\n if self.pipeline and self.pipeline != \"\\\\\":\n task_issuer += f\"{self.pipeline} : When there is no more work to be done, Submit the results to me.\"\n else:\n task_issuer += f\"{self._conversation_focus[task_id]['task_issuer']} : When there is no more work to be done, Submit the results to me.\"\n\n total_tokens = 0\n\n info = f\"\"\"\n\nreference workflow:\n{environment_info()}\"\"\"\n\n workmates = \"\"\n if self.workmates:\n workmates = f\"\"\"\n\nrelevant personnel's name and duty:\n{self.workmates}\n{task_issuer}\"\"\"\n\n workflow = \"\"\n if self._work_flow:\n workflow = f\"\"\"\n{self._work_flow}\"\"\"\n\n repetitive_prompt = f\"\"\"The above is a group chat record, assuming you are {self.name}, please do the following analysis:\n\nStep 1: Understand your overall workflow (No need to output):\n workflow:{workflow}\n\nStep 2: Analyze whether {self.name} is repeating a task in the workflow or encountering difficulties (No need to output).\n\nStep 3: output your analysis results\n If yes, please give advice on how to stop repeating from the perspective of {self.name}.\n If not, please reply one word 'None'.\"\"\"\n\n messages.append([\"Observe whether the previous conversation fell into a cycle\", {'role': 'system', 'content': repetitive_prompt}])\n\n debug_prompt = f\"\"\"The above is a group chat record, please do the following analysis:\n\nStep 1: Understand your overall workflow, Including the execution conditions and objectives for each step (No need to output):\n workflow:{workflow}\n \nStep 2: Analyze whether there are unresolved errors in the previous conversation (No need to output).\n\nStep 3: Analyze If there are unresolved errors, Think about what the root cause of these errors is (No need to output).\n\nStep 4: Analyze If there are unresolved errors, From {self.name}'s perspective, how should you solve it next? (No need to output)\n\nStep 5: output your analysis results, including the following content:\n whether there are unresolved errors in the previous conversation:\n If there are unresolved errors, What errors in the dialogue:\n If there are unresolved errors, The root cause of the error:\n If there are unresolved errors, How to solve it next:\n\nNote: There's no need to output the specific dialogue content, just output the analysis results.\"\"\"\n\n messages.append([\"Reflect on whether there are any errors in the previous dialogue process\", {'role': 'system', 'content': debug_prompt}])\n\n planning_prompt = f\"\"\"The above is a group chat record, assuming you are {self.name}, please do the following analysis:\n\nStep 1: Understand your overall workflow (No need to output):\n workflow:{workflow}\n\nStep 2: Analyze which item to execute or continue to execute in the workflow (No need to output).\n\nStep 3: Understand the specific errors that have occurred in the current conversation (No need to output).\n Are you stuck in a deadlock: {ideas[\"Observe whether the previous conversation fell into a cycle\"]}\n \n {ideas[\"Reflect on whether there are any errors in the previous dialogue process\"]}\n\nStep 4: Understand some rules (No need to output).\n 1. When asking for help, you need to first post a task,\n 2. The recipient does not have any dialogue records before the task begins, nor can they see your conversations with others.\n 2. Don't let the other party to communicate with others.\n 3. In your plan, there should be no content about apologizing to others or what you are going to do.\n\nStep 5: output your analysis results, including the following content:\n Do you need to create a task:\n In the next round of conversation, the specific work you need to do is(Please explain in detail and Ignore the work that has been completed.):\n all the details that need to be taken into consideration, including recommended methods or tools, etc:\n\nNote: There's no need to output the specific dialogue content, just output the analysis results.\n\"\"\"\n\n messages.append([\"Think about what to do next\", {'role': 'system', 'content': planning_prompt}])\n\n communicate_prompt = f\"\"\"your name is {self.name}, please do the following analysis:\n \nStep 1: Understand your work plan (No need to output):\n {ideas[\"Think about what to do next\"]}\n\nStep 2: Get to know your colleagues, including what they can and cannot do (No need to output):\n {workmates}\n {self._conversation_focus[task_id]['task_issuer']} : \"\"\n \nStep 3: Analyze who is the most relevant colleague to the first step of next round of conversation the specific work you need to do, note that you can only choose one person (No need to output).\n\nStep 4: output your analysis results, including the following content:\n who is the most relevant colleague to the first step of your plan:\n What are the requirements when the other party receives messages:\n What can the other party do:\n What the other party cannot do:\n \nNote: please provide the correct names of relevant personnel, Don't provide names that don't exist.\"\"\"\n\n messages.append([\"Think about who to communicate with next\", {'role': 'user', 'content': communicate_prompt}])\n\n reply_prompt = f\"\"\"The above is a group chat record, assuming you are {self.name}, Please strictly follow the contents of the guidelines below to generate your response, note do not communicate with others or perform other tasks:\n\n{info}\n\nStep 1: Clarify who you will be communicating with (No need to output):\n {ideas[\"Think about who to communicate with next\"]}\n\nStep 2: Specify the task you are going to carry out (No need to output):\n {ideas[\"Think about what to do next\"]}\n\nStep 3: Understand some response rules (No need to output).\n 1. Please do not mention the second person in your reply content.\n 2. When you need to post a task, the method is: @recipient {switch.task_tag} task content.\n\nStep 4: Please follow the content of the previous step, From {self.name}'s perspective, Output your response in the format below:\n @who you will be communicating with + Reply content\"\"\"\n\n messages.append([\"Generate reply content\", {'role': 'system', 'content': reply_prompt}])\n\n if index == len(messages) - 1:\n return messages[index], True\n else:\n return messages[index], False\n\n def _generate_process(self, switch: AgentSwitch, task_id: str, sender_name: str, content: str,\n completion_tokens: int):\n \"\"\"Generate process\n\n If the value of the use_tool parameter is None, only the main LLM is used to generate a response.\n 如果 use_tool 参数的值为 None,则仅使用主体 LLM 生成回复。\n\n If the value of the use_tool parameter is 'only', the main LLM is skipped and the tool_function is used directly to generate a response.\n 如果 use_tool 参数的值为 only,则跳过主体 LLM 直接使用 tool_function 生成回复。\n\n If the value of the use_tool parameter is 'join', the main LLM is first used to generate content, and then the generated content is used as the input parameter for tool_function.\n 如果 use_tool 参数的值为 join,则先使用主体 LLM 生成内容,然后将生成的内容作为 tool_function 的输入参数。\n \"\"\"\n hold_content = content\n hold_completion_tokens = completion_tokens\n try:\n if self._use_tool != \"only\":\n if self._use_tool == \"join\":\n print(\n colored(\n f\"\\n\\n>>>>>>>> tool call:\",\n \"cyan\",\n ),\n flush=True,\n )\n content, completion_tokens = self._base_generate_reply(switch, task_id, \"tool_call\")\n else:\n if self.super_rich == \"on\":\n content, completion_tokens = self._super_rich_generate_reply(switch, task_id)\n elif (self.super_rich == \"auto\" or self.super_rich is None) and \"gpt-4\" not in self.agent_config.main_model_config.model:\n content, completion_tokens = self._super_rich_generate_reply(switch, task_id)\n else:\n content, completion_tokens = self._base_generate_reply(switch, task_id, \"main\")\n if content is None:\n raise ValueError(\"Failed to generate content.\")\n else:\n content = re.sub(r'^@\\S+\\s+', '', content).strip()\n\n if self._use_tool and not content.startswith(\"@\"):\n content, completion_tokens = self.tool_function(task_id, content, completion_tokens)\n # Assign recipients for the results generated by the tool_function.\n if not content.startswith(\"@\"):\n if (task_id in self._conversation_focus and \"task_issuer\" in\n self._conversation_focus[task_id]):\n receiver = self._conversation_focus[task_id]['task_issuer']\n else:\n receiver = sender_name\n content = f\"@{receiver} \" + content\n self.response_func(self.name, \"tool\", \"\", False, 0, content, completion_tokens, None)\n self._push_to_switch(switch, task_id, content, completion_tokens)\n except SystemExit:\n print(\"The task is finished.\")\n except Exception as e:\n print(f\"e :{e}\")\n if self._use_tool == \"only\":\n self._push_to_switch(switch, task_id, f\"@{sender_name} Generate error, Trying again\", 4)\n else:\n self._re_push_to_switch(switch, task_id, hold_content, hold_completion_tokens,\n sender_name)\n\n def _base_generate_reply(self, switch: AgentSwitch, task_id: str, gen: str) -> tuple[Optional[str], Optional[int]]:\n \"\"\"Use the main LLM to generate responses.\n\n Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first.\n\n :param switch: AgentSwitch Object\n :param task_id: Task id\n\n :return: --content: Generate content\n --tokens: Generate content tokens\n \"\"\"\n system_message, focus_message, total_tokens = self._base_message(switch, task_id)\n\n # Calculate the target size of context compression.\n safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens\n # Compress the historical conversation records.\n request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size)\n request_messages.insert(0, system_message)\n if focus_message:\n request_messages.insert(0, focus_message)\n return generate_chat_completion(self.agent_config.main_model_config, request_messages, self.name, gen, self.response_func, self.stream_mode)\n\n def _super_rich_generate_reply(self, switch: AgentSwitch, task_id: str) -> tuple[Optional[str], Optional[int]]:\n \"\"\"Use the main LLM to generate responses.\n\n Before generating a response, the historical conversation records within the current task scope, excluding system_message and focus_message, will be compressed first.\n\n :param switch: AgentSwitch Object\n :param task_id: Task id\n\n :return: --content: Generate content\n --tokens: Generate content tokens\n \"\"\"\n system_message, focus_message, total_tokens = self._base_message(switch, task_id)\n\n # Calculate the target size of context compression.\n safe_size = self.agent_config.main_model_config.max_messages_tokens - total_tokens\n\n # Compress the historical conversation records.\n request_messages, total_tokens = self._chat_messages_safe_size(task_id, safe_size)\n\n if focus_message:\n request_messages.insert(0, focus_message)\n\n index = 0\n ideas = defaultdict(str)\n while True:\n message, is_end = self._super_rich_message(switch, task_id, ideas, index)\n if is_end:\n gen = \"main\"\n else:\n gen = \"idea\"\n\n print(\n colored(\n f\"\\n\\n>>>>>>>> {message[0]}:\",\n \"cyan\",\n ),\n flush=True,\n )\n\n if message[1][\"role\"] == \"system\":\n messages = request_messages.copy()\n messages.append(message[1])\n content, token = generate_chat_completion(self.agent_config.main_model_config, messages, self.name, gen, self.response_func, self.stream_mode)\n ideas[message[0]] = content\n tokens = token\n else:\n content, token = generate_chat_completion(self.agent_config.main_model_config, [message[1]], self.name, gen, self.response_func, self.stream_mode)\n ideas[message[0]] = content\n tokens = token\n if is_end:\n break\n else:\n index += 1\n\n return content, tokens\n\n def _push_to_switch(self, switch: AgentSwitch, task_id: str, content: str, completion_tokens: int):\n content = content.replace(f\"@{self.name} \", \"\")\n self._conversation_messages[task_id].append(\n {'role': 'assistant', 'content': content, 'tokens': completion_tokens})\n\n switch.handle_and_forward(task_id, self.name, content, completion_tokens)\n\n def _chat_messages_safe_size(self, task_id: str, safe_size: int) \\\n -> tuple[list, int]:\n \"\"\"Compress the historical session records within the current task scope (excluding system_message and focus_message)\n\n :param task_id: Task id\n :param safe_size: The max_messages_tokens of the main LLM configuration\n\n :return: --request_messages: It is used for the message content requested to LLM, with the tokens field of each message removed.\n –-total_tokens: The overall tokens after compression.\n \"\"\"\n if task_id in self._conversation_messages and self._conversation_messages[task_id]:\n conversation_messages, request_messages, total_tokens = compressed_messages(\n self._conversation_messages[task_id], self._conversation_focus[task_id]['task_content'],\n self.agent_config.summary_model_config, self.name, self.response_func, self.stream_mode,\n safe_size)\n\n if request_messages:\n self._conversation_messages[task_id] = conversation_messages\n return request_messages, total_tokens\n\n return [], 0\n\n @staticmethod\n def _re_push_to_switch(switch: AgentSwitch, task_id: str, content: str, completion_tokens: int, sender: str):\n switch.handle_and_forward(task_id, sender, content, completion_tokens)" }, { "identifier": "compressed_text_universal", "path": "autogan/utils/compressed_text_utils.py", "snippet": "def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None,\n focus: Optional[str] = None, safe_size: Optional[int] = None) \\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Compress the text, generating either a regular summary or a cue summary.\n 压缩文本,可生成普通摘要或线索摘要。\n\n First, the long text is sliced, and then a summary is generated for each slice.\n 首先将长文本切片,然后逐切片的生成摘要。\n\n If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary.\n 如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。\n\n If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size.\n 如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。\n\n :param text: Text to be compressed.\n 待压缩的文本。\n :param summary_model_config: LLM configuration used for text compression.\n 用于压缩文本的 LLM 配置。\n :param agent_name:\n :param response_func: Used to return results to the interface or terminal.\n 用于向接口或终端返回结果\n :param stream_mode:\n :param focus: The focus direction when compressing text.\n 压缩文本时的专注方向。\n :param safe_size: The target size of the text after compression, if not provided there is no limit.\n 文本压缩后的目标尺寸,如果为空则不做限制。\n\n :return:\n --compressed_text: The text after compression.\n 压缩后的文本。\n --total_tokens: Total tokens after compression.\n 压缩后的整体tokens。\n \"\"\"\n\n compressed_text = \"\"\n total_tokens = 0\n\n split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)\n\n for st in split_texts:\n if focus:\n content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func,\n stream_mode)\n else:\n content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode)\n\n if content:\n compressed_text += content + \"\\n\"\n total_tokens += tokens\n\n if compressed_text:\n if safe_size and safe_size < total_tokens:\n return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name,\n response_func, stream_mode)\n else:\n return compressed_text, total_tokens\n else:\n return None, None" }, { "identifier": "WebSearch", "path": "autogan/tools/web_search_tool.py", "snippet": "class WebSearch:\n def __init__(self, google_search_config: Dict):\n \"\"\"A class for google search\n\n :param search_config: JSON format of email_config {\"cx\": \"\", \"key\": \"\"}\n \"\"\"\n self._cx = google_search_config[\"cx\"]\n self._key = google_search_config[\"key\"]\n\n def get_search_detail(self, keyword: str, start: int, agent_name: str, gen: str, response_func: ResponseFuncType)\\\n -> Optional[str]:\n \"\"\"Obtain the main text content of a search result page\n\n :param keyword: Search keywords\n :param start: Search result index offset\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n\n :return: The main content of the page\n \"\"\"\n\n result = self.google_search(keyword, start, 1)\n\n if result is None:\n return None\n\n url = result[0][\"link\"]\n\n response_func(agent_name, gen, \"\", False, 0, url, 0, None)\n\n # Obtain the main content of the URL page\n response = requests.get(url)\n response.encoding = response.apparent_encoding\n soup = BeautifulSoup(response.text, 'html.parser')\n main_text = soup.get_text()\n\n # Remove extra line breaks\n s = re.sub('\\n+', '\\n', main_text)\n\n if s:\n return s\n else:\n return None\n\n def google_search(self, keyword: str, start: int, num: int) -> Optional[list]:\n \"\"\"Call Google web search interface\n\n :param keyword: Search keywords\n :param start: Search result index offset\n :param num: Get the number of results\n\n :return:\n --result_list: Search results list\n --is_success: Successful or not\n \"\"\"\n\n # 接口参数\n url = \"https://www.googleapis.com/customsearch/v1\"\n\n params = {\n 'q': quote(keyword),\n 'start': start,\n 'num': num,\n 'cx': self._cx,\n 'key': self._key,\n }\n\n loop = 3\n for i in range(loop):\n try:\n response = requests.get(url, params=params)\n response.raise_for_status() # If the response status is not 200, throw an exception\n data = response.json() # Parse the returned json data\n\n if 'items' not in data:\n raise ValueError(\"The return value is empty.\")\n\n # Extract the title, link, and snippet fields from each object in the items field.\n results = []\n for item in data['items']:\n result = {\n 'title': item.get('title', ''),\n 'link': item.get('link', ''),\n 'snippet': item.get('snippet', '')\n }\n results.append(result)\n\n return results\n except requests.HTTPError as http_err:\n time.sleep(5)\n if i == loop - 1:\n print(f'HTTP error occurred: {http_err}')\n return None\n except Exception as e:\n time.sleep(5)\n if i == loop - 1:\n return None" } ]
import re from collections import defaultdict from typing import Optional, Dict from autogan.tools.code_execution_tool import CodeExecution from autogan.tools.wolfram_alpha_tool import WolframAlphaAPIWrapper from autogan.oai.count_tokens_utils import count_text_tokens from autogan.agents.universal_agent import UniversalAgent from autogan.utils.compressed_text_utils import compressed_text_universal from autogan.tools.web_search_tool import WebSearch
11,974
class ToolAgentSearch(UniversalAgent): def __init__( self, search_config: Dict, agent_config: Optional[Dict] = None, retry_times: Optional[int] = 10, name: Optional[str] = "WebSearchExp", duty: Optional[str] = 'Not only can I search for information on the internet, ' 'but I can also answer questions using the Wolfram engine.', work_flow: Optional[str] = """I hope you are an internet search expert. When you receive a search request, you have the following two tools to choose from: 1. web: You can search for information on the internet. When using it, please enclose the search keywords in your output with the ```web\n ``` symbol, for example: ```web Your search keywords ``` 2. wolfram: You can use the Wolfram engine to help you calculate or query data related to Mathematics, finance, unit conversion, data analysis, science, geography, history, culture, movies, music, etc. When using it, please enclose the English question that Wolfram can understand in your output with the ```wolfram\n ``` symbol, for example: ```wolfram one wolfram query ``` Note: When you decide to use a tool, please do not @ anyone.""", # duty: Optional[str] = '我不但可以从网络上搜索资料,还可以通过 wolfram 引擎来回答问题。', # work_flow: Optional[str] = """我希望你是一个网络搜索专家,当你收到搜索请求时,你有一下两种工具可供选择: # # 1. web: 可以在网络上查找资料。使用时请在你的输出内容中,将搜索关键词用```web\n ``` 符号封装,例如: # ```web # Your search keywords # ``` # # 2.wolfram: 可以使用wolfram引擎,帮你计算或查询数学、金融、单位转换、数据分析、科学、地理、历史、文化、电影、音乐等相关数据。使用时请在你的输出内容中,将 wolfram 可以理解的英文问题用```wolfram\n ``` 符号封装,例如: # ```wolfram # one wolfram query # ``` # # 注意:当你决定使用工具时,请不要@任何人""", ): """WebSearchExpert 1.Receive the user's question and convert it into search keywords. 2.Call the Google Search API to obtain a result and extract the webpage content. 3.If no content related to the user's question is extracted, call the Google Search API again to obtain the next result. 4.Repeat operations 2 and 3 until reaching retry_times. Within the same task session domain, if the search keywords are the same, the offset of the search results will accumulate and move backwards. :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param search_config: JSON format of email_config {"cx": "", "key": ""} :param retry_times: Represent the maximum number of attempts for each search, the default is 10. :param name: The agent name should be unique in the organizational structure. :param duty: Used to explain one's job responsibilities to other agents. :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 """ super().__init__( name, agent_config=agent_config, duty=duty, work_flow=work_flow, use_tool="join" ) self._web_search = WebSearch(search_config["google_search"]) if "google_search" in search_config else None self._wolfram_alpha = WolframAlphaAPIWrapper( search_config["wolfram_alpha"]) if "wolfram_alpha" in search_config else None self._conversation_search_index = defaultdict(int) self._retry_times = retry_times def tool_function(self, task_id: str, param: Optional[str] = None, tokens: Optional[int] = None) -> tuple[str, int]: lang, code = CodeExecution.extract_code(param) if lang == "web" and code: if self._web_search: return self._web_function(task_id, code) else: return "Please add the Google Custom Search JSON API configuration.", 0 elif lang == "wolfram" and code: if self._wolfram_alpha: return self._wolfram_alpha_function(code) else: return "Please add the WolframAlphaAPI configuration.", 0 else: return """Please make a choice between web and wolfram, and use the ``` symbol for encapsulation, for example: ```wolfram one wolfram query ```""", 18 def _web_function(self, task_id: str, param: str) -> tuple[str, int]: loop = self._retry_times for i in range(loop): # Accumulate the search offset of the same task and the same keyword. self._conversation_search_index[task_id] += 1 start = self._conversation_search_index[task_id] # Get webpage content. detail = self._web_search.get_search_detail(param, start, self.name, "search", self.response_func) if detail: # Extract content related to the user's question from the webpage content.
class ToolAgentSearch(UniversalAgent): def __init__( self, search_config: Dict, agent_config: Optional[Dict] = None, retry_times: Optional[int] = 10, name: Optional[str] = "WebSearchExp", duty: Optional[str] = 'Not only can I search for information on the internet, ' 'but I can also answer questions using the Wolfram engine.', work_flow: Optional[str] = """I hope you are an internet search expert. When you receive a search request, you have the following two tools to choose from: 1. web: You can search for information on the internet. When using it, please enclose the search keywords in your output with the ```web\n ``` symbol, for example: ```web Your search keywords ``` 2. wolfram: You can use the Wolfram engine to help you calculate or query data related to Mathematics, finance, unit conversion, data analysis, science, geography, history, culture, movies, music, etc. When using it, please enclose the English question that Wolfram can understand in your output with the ```wolfram\n ``` symbol, for example: ```wolfram one wolfram query ``` Note: When you decide to use a tool, please do not @ anyone.""", # duty: Optional[str] = '我不但可以从网络上搜索资料,还可以通过 wolfram 引擎来回答问题。', # work_flow: Optional[str] = """我希望你是一个网络搜索专家,当你收到搜索请求时,你有一下两种工具可供选择: # # 1. web: 可以在网络上查找资料。使用时请在你的输出内容中,将搜索关键词用```web\n ``` 符号封装,例如: # ```web # Your search keywords # ``` # # 2.wolfram: 可以使用wolfram引擎,帮你计算或查询数学、金融、单位转换、数据分析、科学、地理、历史、文化、电影、音乐等相关数据。使用时请在你的输出内容中,将 wolfram 可以理解的英文问题用```wolfram\n ``` 符号封装,例如: # ```wolfram # one wolfram query # ``` # # 注意:当你决定使用工具时,请不要@任何人""", ): """WebSearchExpert 1.Receive the user's question and convert it into search keywords. 2.Call the Google Search API to obtain a result and extract the webpage content. 3.If no content related to the user's question is extracted, call the Google Search API again to obtain the next result. 4.Repeat operations 2 and 3 until reaching retry_times. Within the same task session domain, if the search keywords are the same, the offset of the search results will accumulate and move backwards. :param agent_config: The agent configuration includes: agent 配置包括: - main_model: The LLM configuration of the agent's main body. agent 主体的 LLM 配置。 - summary_model: The LLM configuration used for compressing context and generating text summaries. 用于压缩上下文以及生成文本摘要的 LLM 配置。 - request_interval_time: The interval time of LLM requests. LLM 请求间隔时间。 - request_timeout:The timeout of LLM requests. LLM 请求超时时间。 - max_retries: The maximum number of retries for LLM requests. LLM 请求最大重试次数。 :param search_config: JSON format of email_config {"cx": "", "key": ""} :param retry_times: Represent the maximum number of attempts for each search, the default is 10. :param name: The agent name should be unique in the organizational structure. :param duty: Used to explain one's job responsibilities to other agents. :param work_flow: Defines the workflow of the agent. 定义 agent 的工作流程。 """ super().__init__( name, agent_config=agent_config, duty=duty, work_flow=work_flow, use_tool="join" ) self._web_search = WebSearch(search_config["google_search"]) if "google_search" in search_config else None self._wolfram_alpha = WolframAlphaAPIWrapper( search_config["wolfram_alpha"]) if "wolfram_alpha" in search_config else None self._conversation_search_index = defaultdict(int) self._retry_times = retry_times def tool_function(self, task_id: str, param: Optional[str] = None, tokens: Optional[int] = None) -> tuple[str, int]: lang, code = CodeExecution.extract_code(param) if lang == "web" and code: if self._web_search: return self._web_function(task_id, code) else: return "Please add the Google Custom Search JSON API configuration.", 0 elif lang == "wolfram" and code: if self._wolfram_alpha: return self._wolfram_alpha_function(code) else: return "Please add the WolframAlphaAPI configuration.", 0 else: return """Please make a choice between web and wolfram, and use the ``` symbol for encapsulation, for example: ```wolfram one wolfram query ```""", 18 def _web_function(self, task_id: str, param: str) -> tuple[str, int]: loop = self._retry_times for i in range(loop): # Accumulate the search offset of the same task and the same keyword. self._conversation_search_index[task_id] += 1 start = self._conversation_search_index[task_id] # Get webpage content. detail = self._web_search.get_search_detail(param, start, self.name, "search", self.response_func) if detail: # Extract content related to the user's question from the webpage content.
compressed_text, total_tokens = compressed_text_universal(
4
2023-12-06 03:24:34+00:00
16k
TACJu/Compositor
Compositor_Mask2Former/mask2former_video/data_video/ytvis_eval.py
[ { "identifier": "YTVOS", "path": "Compositor_Mask2Former/mask2former_video/data_video/datasets/ytvis_api/ytvos.py", "snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset,self.anns,self.cats,self.vids = dict(),dict(),dict(),dict()\n self.vidToAnns, self.catToVids = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...')\n anns, cats, vids = {}, {}, {}\n vidToAnns,catToVids = defaultdict(list),defaultdict(list)\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n vidToAnns[ann['video_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'videos' in self.dataset:\n for vid in self.dataset['videos']:\n vids[vid['id']] = vid\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToVids[ann['category_id']].append(ann['video_id'])\n\n print('index created!')\n\n # create class members\n self.anns = anns\n self.vidToAnns = vidToAnns\n self.catToVids = catToVids\n self.vids = vids\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))\n\n def getAnnIds(self, vidIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param vidIds (int array) : get anns for given vids\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(vidIds) == 0:\n lists = [self.vidToAnns[vidId] for vidId in vidIds if vidId in self.vidToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['avg_area'] > areaRng[0] and ann['avg_area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n supNms = supNms if _isArrayLike(supNms) else [supNms]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getVidIds(self, vidIds=[], catIds=[]):\n '''\n Get vid ids that satisfy given filter conditions.\n :param vidIds (int array) : get vids for given ids\n :param catIds (int array) : get vids with all given cats\n :return: ids (int array) : integer array of vid ids\n '''\n vidIds = vidIds if _isArrayLike(vidIds) else [vidIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(vidIds) == len(catIds) == 0:\n ids = self.vids.keys()\n else:\n ids = set(vidIds)\n for i, catId in enumerate(catIds):\n if i == 0 and len(ids) == 0:\n ids = set(self.catToVids[catId])\n else:\n ids &= set(self.catToVids[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadVids(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying vid\n :return: vids (object array) : loaded vid objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.vids[id] for id in ids]\n elif type(ids) == int:\n return [self.vids[ids]]\n\n\n def loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = YTVOS()\n res.dataset['videos'] = [img for img in self.dataset['videos']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsVidIds = [ann['video_id'] for ann in anns]\n assert set(annsVidIds) == (set(annsVidIds) & set(self.getVidIds())), \\\n 'Results do not correspond to current coco set'\n if 'segmentations' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n ann['areas'] = []\n if not 'bboxes' in ann:\n ann['bboxes'] = []\n for seg in ann['segmentations']:\n # now only support compressed RLE format as segmentation results\n if seg:\n ann['areas'].append(maskUtils.area(seg))\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(maskUtils.toBbox(seg))\n else:\n ann['areas'].append(None)\n if len(ann['bboxes']) < len(ann['areas']):\n ann['bboxes'].append(None)\n ann['id'] = id+1\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n ann['iscrowd'] = 0\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n def annToRLE(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n t = self.vids[ann['video_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentations'][frameId]\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = segm\n return rle\n\n def annToMask(self, ann, frameId):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann, frameId)\n m = maskUtils.decode(rle)\n return m" }, { "identifier": "YTVOSeval", "path": "Compositor_Mask2Former/mask2former_video/data_video/datasets/ytvis_api/ytvoseval.py", "snippet": "class YTVOSeval:\n # Interface for evaluating video instance segmentation on the YouTubeVIS dataset.\n #\n # The usage for YTVOSeval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = YTVOSeval(cocoGt,cocoDt); # initialize YTVOSeval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalVids = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.vidIds = sorted(cocoGt.getVidIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n for i, a in enumerate(ann['segmentations']):\n if a:\n rle = coco.annToRLE(ann, i)\n ann['segmentations'][i] = rle\n l = [a for a in ann['areas'] if a]\n if len(l)==0:\n ann['avg_area'] = 0\n else:\n ann['avg_area'] = np.array(l).mean() \n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(vidIds=p.vidIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(vidIds=p.vidIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['video_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['video_id'], dt['category_id']].append(dt)\n self.evalVids = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalVids\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.vidIds = list(np.unique(p.vidIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(vidId, catId): computeIoU(vidId, catId) \\\n for vidId in p.vidIds\n for catId in catIds}\n\n evaluateVid = self.evaluateVid\n maxDet = p.maxDets[-1]\n \n \n self.evalImgs = [evaluateVid(vidId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for vidId in p.vidIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, vidId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentations'] for g in gt]\n d = [d['segmentations'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bboxes'] for g in gt]\n d = [d['bboxes'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n #ious = maskUtils.iou(d,g,iscrowd)\n def iou_seq(d_seq, g_seq):\n i = .0\n u = .0\n for d, g in zip(d_seq, g_seq):\n if d and g:\n i += maskUtils.area(maskUtils.merge([d, g], True))\n u += maskUtils.area(maskUtils.merge([d, g], False))\n elif not d and g:\n u += maskUtils.area(g)\n elif d and not g:\n u += maskUtils.area(d)\n if not u > .0:\n print(\"Mask sizes in video {} and category {} may not match!\".format(vidId, catId))\n iou = i / u if u > .0 else .0\n return iou\n ious = np.zeros([len(d), len(g)])\n for i, j in np.ndindex(ious.shape):\n ious[i, j] = iou_seq(d[i], g[j])\n #print(vidId, catId, ious.shape, ious)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['avg_area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateVid(self, vidId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[vidId,catId]\n dt = self._dts[vidId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[vidId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[vidId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['avg_area']<aRng[0] or g['avg_area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[vidId, catId][:, gtind] if len(self.ious[vidId, catId]) > 0 else self.ious[vidId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['avg_area']<aRng[0] or d['avg_area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'video_id': vidId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.vidIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.vidIds) if i in setI]\n I0 = len(_pe.vidIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from .datasets.ytvis_api.ytvos import YTVOS from .datasets.ytvis_api.ytvoseval import YTVOSeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.evaluation import DatasetEvaluator from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table
12,570
Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format("segm") + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format("segm") + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json_video(inputs, outputs): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): video_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ assert len(inputs) == 1, "More than one inputs are loaded for inference!" video_id = inputs[0]["video_id"] video_length = inputs[0]["length"] scores = outputs["pred_scores"] labels = outputs["pred_labels"] masks = outputs["pred_masks"] ytvis_results = [] for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)): segms = [ mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0] for _mask in m ] for rle in segms: rle["counts"] = rle["counts"].decode("utf-8") res = { "video_id": video_id, "score": s, "category_id": l, "segmentations": segms, } ytvis_results.append(res) return ytvis_results def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results)
# Copyright (c) Facebook, Inc. and its affiliates. # Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC class YTVISEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, use_fast_impl=True, ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file in torch serialization format that contains all the raw original predictions. 2. "coco_instances_results.json" a json file in COCO's result format. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl if tasks is not None and isinstance(tasks, CfgNode): self._logger.warning( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) json_file = PathManager.get_local_path(self._metadata.json_file) with contextlib.redirect_stdout(io.StringIO()): self._ytvis_api = YTVOS(json_file) # Test set json files do not contain annotations (evaluation must be # performed using the COCO evaluation server). self._do_evaluation = "annotations" in self._ytvis_api.dataset def reset(self): self._predictions = [] def process(self, inputs, outputs): """ Args: inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). It is a list of dict. Each dict corresponds to an image and contains keys like "height", "width", "file_name", "image_id". outputs: the outputs of a COCO model. It is a list of dicts with key "instances" that contains :class:`Instances`. """ prediction = instances_to_coco_json_video(inputs, outputs) self._predictions.extend(prediction) def evaluate(self): """ Args: img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset """ if self._distributed: comm.synchronize() predictions = comm.gather(self._predictions, dst=0) predictions = list(itertools.chain(*predictions)) if not comm.is_main_process(): return {} else: predictions = self._predictions if len(predictions) == 0: self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") return {} if self._output_dir: PathManager.mkdirs(self._output_dir) file_path = os.path.join(self._output_dir, "instances_predictions.pth") with PathManager.open(file_path, "wb") as f: torch.save(predictions, f) self._results = OrderedDict() self._eval_predictions(predictions) # Copy so the caller can do whatever with results return copy.deepcopy(self._results) def _eval_predictions(self, predictions): """ Evaluate predictions. Fill self._results with the metrics of the tasks. """ self._logger.info("Preparing results for YTVIS format ...") # unmap the category ids for COCO if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) num_classes = len(all_contiguous_ids) assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} for result in predictions: category_id = result["category_id"] assert category_id < num_classes, ( f"A prediction has class={category_id}, " f"but the dataset only has {num_classes} classes and " f"predicted class id should be in [0, {num_classes - 1}]." ) result["category_id"] = reverse_id_mapping[category_id] if self._output_dir: file_path = os.path.join(self._output_dir, "results.json") self._logger.info("Saving results to {}".format(file_path)) with PathManager.open(file_path, "w") as f: f.write(json.dumps(predictions)) f.flush() if not self._do_evaluation: self._logger.info("Annotations are not available for evaluation.") return coco_eval = ( _evaluate_predictions_on_coco( self._ytvis_api, predictions, ) if len(predictions) > 0 else None # cocoapi does not handle empty results very well ) res = self._derive_coco_results( coco_eval, class_names=self._metadata.get("thing_classes") ) self._results["segm"] = res def _derive_coco_results(self, coco_eval, class_names=None): """ Derive the desired score numbers from summarized COCOeval. Args: coco_eval (None or COCOEval): None represents no predictions from model. iou_type (str): class_names (None or list[str]): if provided, will use it to predict per-category AP. Returns: a dict of {metric name: score} """ metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl", "AR1", "AR10"] if coco_eval is None: self._logger.warn("No predictions from the model!") return {metric: float("nan") for metric in metrics} # the standard metrics results = { metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") for idx, metric in enumerate(metrics) } self._logger.info( "Evaluation results for {}: \n".format("segm") + create_small_table(results) ) if not np.isfinite(sum(results.values())): self._logger.info("Some metrics cannot be computed and is shown as NaN.") if class_names is None or len(class_names) <= 1: return results # Compute per-category AP # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa precisions = coco_eval.eval["precision"] # precision has dims (iou, recall, cls, area range, max dets) assert len(class_names) == precisions.shape[2] results_per_category = [] for idx, name in enumerate(class_names): # area range index 0: all area ranges # max dets index -1: typically 100 per image precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] ap = np.mean(precision) if precision.size else float("nan") results_per_category.append(("{}".format(name), float(ap * 100))) # tabulate it N_COLS = min(6, len(results_per_category) * 2) results_flatten = list(itertools.chain(*results_per_category)) results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) table = tabulate( results_2d, tablefmt="pipe", floatfmt=".3f", headers=["category", "AP"] * (N_COLS // 2), numalign="left", ) self._logger.info("Per-category {} AP: \n".format("segm") + table) results.update({"AP-" + name: ap for name, ap in results_per_category}) return results def instances_to_coco_json_video(inputs, outputs): """ Dump an "Instances" object to a COCO-format json that's used for evaluation. Args: instances (Instances): video_id (int): the image id Returns: list[dict]: list of json annotations in COCO format. """ assert len(inputs) == 1, "More than one inputs are loaded for inference!" video_id = inputs[0]["video_id"] video_length = inputs[0]["length"] scores = outputs["pred_scores"] labels = outputs["pred_labels"] masks = outputs["pred_masks"] ytvis_results = [] for instance_id, (s, l, m) in enumerate(zip(scores, labels, masks)): segms = [ mask_util.encode(np.array(_mask[:, :, None], order="F", dtype="uint8"))[0] for _mask in m ] for rle in segms: rle["counts"] = rle["counts"].decode("utf-8") res = { "video_id": video_id, "score": s, "category_id": l, "segmentations": segms, } ytvis_results.append(res) return ytvis_results def _evaluate_predictions_on_coco( coco_gt, coco_results, img_ids=None, ): """ Evaluate the coco results using COCOEval API. """ assert len(coco_results) > 0 coco_results = copy.deepcopy(coco_results) # When evaluating mask AP, if the results contain bbox, cocoapi will # use the box area as the area of the instance, instead of the mask area. # This leads to a different definition of small/medium/large. # We remove the bbox field to let mask AP use mask area. for c in coco_results: c.pop("bbox", None) coco_dt = coco_gt.loadRes(coco_results)
coco_eval = YTVOSeval(coco_gt, coco_dt)
1
2023-12-12 11:49:28+00:00
16k
ebb-earl-co/tidal-wave
tidal_wave/playlist.py
[ { "identifier": "AudioFormat", "path": "tidal_wave/media.py", "snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\"" }, { "identifier": "PlaylistsEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class PlaylistsEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the (music) video, including video quality, video title, date,\n video artists, duration, etc.\"\"\"\n\n uuid: str = field(repr=False)\n title: str\n number_of_tracks: int\n number_of_videos: int\n description: str\n created: Annotated[datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")]\n type: str\n public_playlist: bool\n url: str\n square_image: str # UUID v4" }, { "identifier": "TracksEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class TracksEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, tracks/{TRACKID} endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the audio track, including audio quality, track title, ISRC,\n track artists, album, track number, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n duration: int # seconds\n replay_gain: float = field(repr=False)\n peak: float = field(repr=False)\n track_number: int\n volume_number: int\n version: Optional[str]\n copyright: str = field(repr=False)\n url: str\n isrc: str = field(repr=False)\n explicit: bool\n audio_quality: str = field(repr=False)\n audio_modes: List[str] = field(repr=False)\n media_metadata: \"MediaMetadata\"\n artist: \"Artist\"\n artists: List[\"Artist\"]\n album: \"TrackAlbum\"\n\n def __post_init__(self):\n name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )\n self.name: str = name if self.version is None else f\"{name} ({self.version})\"" }, { "identifier": "VideosEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class VideosEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the (music) video, including video quality, video title, date,\n video artists, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n volume_number: int\n track_number: int\n release_date: Annotated[\n datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n ]\n duration: int # seconds\n quality: str\n explicit: bool\n type: str\n artist: \"Artist\"\n artists: List[\"Artist\"]\n\n def __post_init__(self):\n self.name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )" }, { "identifier": "request_playlists", "path": "tidal_wave/requesting.py", "snippet": "def request_playlists(\n session: Session, identifier: int\n) -> Optional[PlaylistsEndpointResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"playlists\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n subclass=PlaylistsEndpointResponseJSON,\n )" }, { "identifier": "Track", "path": "tidal_wave/track.py", "snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None" }, { "identifier": "download_cover_image", "path": "tidal_wave/utils.py", "snippet": "def download_cover_image(\n session: Session,\n cover_uuid: str,\n output_dir: Path,\n file_name: str = \"cover.jpg\",\n dimension: Union[int, Tuple[int]] = 1280,\n) -> Optional[Path]:\n \"\"\"Given a UUID that corresponds to a (JPEG) image on Tidal's servers,\n download the image file and write it as 'cover.jpeg' or 'cover.png'\n in the directory `path_to_output_dir`. Returns path to downloaded file\"\"\"\n cover_url_part: str = cover_uuid.replace(\"-\", \"/\")\n if isinstance(dimension, int):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension}x{dimension}\"\n elif isinstance(dimension, tuple):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension[0]}x{dimension[1]}\"\n\n with session.get(url=_url, headers={\"Accept\": \"image/jpeg\"}) as r:\n if not r.ok:\n logger.warning(\n \"Could not retrieve data from Tidal resources/images URL \"\n f\"due to error code: {r.status_code}\"\n )\n logger.debug(r.reason)\n return\n else:\n bytes_to_write = BytesIO(r.content)\n\n if bytes_to_write is not None:\n output_file: Path = output_dir / file_name\n bytes_to_write.seek(0)\n output_file.write_bytes(bytes_to_write.read())\n bytes_to_write.close()\n return output_file" }, { "identifier": "temporary_file", "path": "tidal_wave/utils.py", "snippet": "@contextmanager\ndef temporary_file(suffix: str = \".mka\"):\n \"\"\"This context-managed function is a stand-in for\n tempfile.NamedTemporaryFile as that stdlib object experiences\n errors on Windows.\"\"\"\n file_name: str = os.path.join(\n tempfile.gettempdir(), f\"{os.urandom(24).hex()}{suffix}\"\n )\n if not os.path.exists(file_name):\n open(file=file_name, mode=\"x\").close()\n\n tf = open(file=file_name, mode=\"wb\")\n try:\n yield tf\n finally:\n tf.close()\n os.unlink(tf.name)" }, { "identifier": "TIDAL_API_URL", "path": "tidal_wave/utils.py", "snippet": "TIDAL_API_URL: str = \"https://api.tidal.com/v1\"" }, { "identifier": "Video", "path": "tidal_wave/video.py", "snippet": "class Video:\n video_id: int\n\n def __post_init__(self):\n self.tags: dict = {}\n self.codec: str = \"mp4\"\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /videos endpoint\"\"\"\n self.metadata: Optional[VideosEndpointResponseJSON] = request_videos(\n session, self.video_id\n )\n\n def get_contributors(self, session: Session):\n \"\"\"Request from TIDAL API /videos/contributors endpoint\"\"\"\n self.contributors: Optional[\n VideosContributorsResponseJSON\n ] = request_video_contributors(session, self.video_id)\n\n def get_stream(self, session: Session, video_format=VideoFormat.high):\n \"\"\"Populates self.stream by requesting from TIDAL API\n /videos/playbackinfopostpaywall endpoint\"\"\"\n self.stream: Optional[VideosEndpointStreamResponseJSON] = request_video_stream(\n session, self.video_id, video_format.value\n )\n\n def get_m3u8(self, session: Session):\n \"\"\"This method sets self.m3u8, an m3u8.M3U8 object\n following the HTTP Live Streaming specification; parsed from\n self.stream. I.e., self.get_stream() needs to have been executed\n before calling this method. N.b. self.m3u8 almost certainly will\n be a multivariant playlist, meaning further processing of its\n contents will be necessary.\"\"\"\n self.m3u8: m3u8.Playlist = playlister(session=session, vesrj=self.stream)\n\n def set_urls(self):\n \"\"\"This method uses self.m3u8, an m3u8.M3U8 object that is variant:\n (https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)\n It retrieves the highest-quality .m3u8 in its .playlists attribute,\n and sets self.urls as the list of strings from that m3u8.Playlist\"\"\"\n # for now, just get the highest-bandwidth playlist\n playlist: m3u8.Playlist = variant_streams(self.m3u8)\n self.M3U8 = m3u8.load(playlist.uri)\n if self.M3U8 is None or len(self.M3U8.files) == 0:\n raise TidalM3U8Exception(\n f\"HLS media segments are not available for video {self.video_id}\"\n )\n self.urls: List[str] = self.M3U8.files\n\n def set_artist_dir(self, out_dir: Path):\n \"\"\"Set self.artist_dir, which is the subdirectory of `out_dir`\n with name `self.metadata.artist.name`\"\"\"\n self.artist_dir: Path = out_dir / self.metadata.artist.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, out_dir: Path):\n \"\"\"Set self.filename, which is constructed from self.metadata.name\n and self.stream.video_quality\"\"\"\n self.filename: str = (\n f\"{self.metadata.name} [{self.stream.video_quality}].{self.codec}\"\n )\n\n def set_outfile(self):\n \"\"\"Uses self.artist_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n self.outfile: Path = self.artist_dir / self.filename\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Video {str(self.outfile.absolute())} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"Requests the HLS video files that constitute self.video_id.\n Writes HLS bytes to a temporary file, then uses FFmpeg to write the\n video data to self.outfile\"\"\"\n if session.session_id is not None:\n download_headers: Dict[str, str] = {\"sessionId\": session.session_id}\n else:\n download_headers: dict = dict()\n download_params: Dict[str, None] = {k: None for k in session.params}\n # self.outfile should already have been set by self.set_outfile()\n logger.info(\n f\"Writing video {self.video_id} to '{str(self.outfile.absolute())}'\"\n )\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=download_headers, params=download_params\n ) as download_response:\n if not download_response.ok:\n logger.warning(f\"Could not download {self}\")\n else:\n ntf.write(download_response.content)\n else:\n ntf.seek(0)\n\n # will always be .mp4 because HLS\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n str(self.outfile.absolute()),\n vcodec=\"copy\",\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n logger.info(\n f\"Video {self.video_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary, write the correct values of\n various metadata tags to the file. Videos are .mp4\"\"\"\n tags = dict()\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"comment\"]] = f\"https://tidal.com/browse/video/{self.video_id}\"\n tags[tag_map[\"date\"]] = str(self.metadata.release_date.date())\n tags[tag_map[\"title\"]] = self.metadata.title\n\n for tag in {\"composer\", \"director\", \"lyricist\", \"producer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.contributors, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n self.mutagen.save()\n\n def get(\n self,\n session: Session,\n out_dir: Path,\n metadata: Optional[\"VideosEndpointResponseJSON\"] = None,\n ) -> Optional[str]:\n \"\"\"The main method of this class. Executes a number of other methods\n in a row:\n - self.get_metadata()\n - self.get_contributors()\n - self.get_stream()\n - self.get_m3u8()\n - self.set_urls()\n - self.set_artist_dir()\n - self.set_filename()\n - self.set_outfile()\n - self.download()\n - self.craft_tags()\n - self.set_tags()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n return None\n\n self.get_contributors(session)\n self.get_stream(session)\n if self.stream is None:\n return None\n self.get_m3u8(session)\n self.set_urls()\n self.set_artist_dir(out_dir)\n self.set_filename(out_dir)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return None\n\n if self.download(session, out_dir) is None:\n return None\n\n self.craft_tags()\n self.set_tags()\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n json.dump({self.metadata.title: str(self.outfile.absolute())}, fp)\n\n def dumps(self) -> str:\n return json.dumps({self.metadata.title: str(self.outfile.absolute())})" } ]
from dataclasses import dataclass from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Optional, Set, Tuple, Union from requests import HTTPError, Session from .media import AudioFormat from .models import ( PlaylistsEndpointResponseJSON, TracksEndpointResponseJSON, VideosEndpointResponseJSON, ) from .requesting import request_playlists from .track import Track from .utils import download_cover_image, temporary_file, TIDAL_API_URL from .video import Video import json import logging import math import shutil import sys import ffmpeg import mutagen
11,213
else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) files[i - 1] = {i: None} continue _path: Optional[Path] = Path(tv.outfile) if tv is not None else None # if the item never got turned into a track or video if _path is None: files[i - 1] = {i: None} continue # if the track or video didn't download if _path.exists(): if _path.stat().st_size == 0: files[i - 1] = {i: None} continue else: files[i - 1] = {i: None} continue # otherwise, move files and clean up if isinstance(tv, Track): new_path: Path = self.playlist_dir / f"{i:03d} - {tv.trackname}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} elif isinstance(tv, Video): new_path: Path = self.playlist_dir / f"{i:03d} - {_path.name}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} else: self.files: List[Dict[int, Optional[str]]] = files # Find all subdirectories written to subdirs: Set[Path] = set() for tv in self.tracks_videos: if isinstance(tv, Track): try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) elif isinstance(tv, Video): subdirs.add(tv.artist_dir) # Copy all artist images, artist bio JSON files out # of subdirs artist_images: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*.jpg"): if p.name == "cover.jpg": continue artist_images.add(p) else: for artist_image_path in artist_images: if artist_image_path.exists(): shutil.copyfile( artist_image_path.absolute(), self.playlist_dir / artist_image_path.name, ) artist_bios: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*bio.json"): artist_bios.add(p) else: for artist_bio_path in artist_bios: if artist_bio_path.exists(): shutil.copyfile( artist_bio_path.absolute(), self.playlist_dir / artist_bio_path.name, ) # Remove all subdirs for subdir in subdirs: if subdir.exists(): shutil.rmtree(subdir) else: return self.playlist_dir def craft_m3u8_text(self): """This method creates a file called playlist.m3u8 in self.playlist_dir that is a standard M3U. Needs to be called after self.flatten_playlist_dir in order to be able to access self.files N.b. the already-written file is temporarily copied to a .mp4 version in a temporary directory because .m4a files cannot be read with mutagen.""" m3u_text: str = f"#EXTM3U\n#EXTENC:UTF-8\n#EXTIMG:{str(self.cover_path.absolute())}\n#PLAYLIST:{self.name}\n" logger.info( f"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'" ) for d in self.files: file: str = next(iter(d.values())) if file is None: continue elif file.endswith(".flac"): m = mutagen.File(file) artist: str = m.get("artist", [""])[0] title: str = m.get("title", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".mka"): m = mutagen.File(file) artist: str = m.get("ARTI", [""])[0] title: str = m.get("TITL", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".m4a"): # Mutagen cannot read .m4a files, so make a copy with all # of the metadata tags as a .mp4 in a temporary directory
logger = logging.getLogger("__name__") @dataclass class Playlist: playlist_id: str # UUID4 def __post_init__(self): self.playlist_dir: Optional[Path] = None self.playlist_cover_saved: bool = False def get_metadata(self, session: Session): """Request from TIDAL API /playlists endpoint""" self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists( session=session, identifier=self.playlist_id ) if self.metadata is None: return self.name = ( self.metadata.title.replace("/", "_") .replace("|", "_") .replace(":", " -") .replace('"', "") .replace("..", "") ) def set_items(self, session: Session): """Uses data from TIDAL API /playlists/items endpoint to populate self.items""" playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist( session=session, playlist_id=self.playlist_id ) if playlist_items is None: self.items = tuple() else: self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items) def set_dir(self, out_dir: Path): """Populates self.playlist_dir based on self.name, self.playlist_id""" playlist_substring: str = f"{self.name} [{self.playlist_id}]" self.playlist_dir: Path = out_dir / "Playlists" / playlist_substring self.playlist_dir.mkdir(parents=True, exist_ok=True) def save_cover_image(self, session: Session, out_dir: Path): """Requests self.metadata.image and attempts to write it to disk""" if self.playlist_dir is None: self.set_dir(out_dir=out_dir) self.cover_path: Path = self.playlist_dir / "cover.jpg" if not self.cover_path.exists(): download_cover_image( session=session, cover_uuid=self.metadata.square_image, output_dir=self.playlist_dir, dimension=1080, ) else: self.playlist_cover_saved = True def save_description(self): """Requests self.metadata.description and attempts to write it to disk""" description_path: Path = self.playlist_dir / "PlaylistDescription.txt" if self.metadata.description is not None and len(self.metadata.description) > 0: if not description_path.exists(): description_path.write_text(f"{self.metadata.description}\n") def get_items(self, session: Session, audio_format: AudioFormat): """Using either Track.get() or Video.get(), attempt to request the data for each track or video in self.items""" if len(self.items) == 0: return tracks_videos: list = [None] * len(self.items) for i, item in enumerate(self.items): if item is None: tracks_videos[i] = None continue elif isinstance(item, TracksEndpointResponseJSON): track: Track = Track(track_id=item.id) track.get( session=session, audio_format=audio_format, out_dir=self.playlist_dir, metadata=item, ) tracks_videos[i] = track elif isinstance(item, VideosEndpointResponseJSON): video: Video = Video(video_id=item.id) video.get( session=session, out_dir=self.playlist_dir, metadata=item, ) tracks_videos[i] = video else: tracks_videos[i] = None continue else: self.tracks_videos: Tuple[ Tuple[int, Optional[Union[Track, Video]]] ] = tuple(tracks_videos) return tracks_videos def flatten_playlist_dir(self): """When self.get_items() is called, the tracks and/or videos in self.items are downloaded using their self-contained .get() logic; this means that they will be downloaded to albums. This function "flattens" self.playlist_dir, meaning that it moves all downloaded audio and video files to self.playlist_dir, and removes the various subdirectories created""" files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos) if len(self.tracks_videos) == 0: return subdirs: Set[Path] = set() for i, tv in enumerate(self.tracks_videos, 1): if getattr(tv, "outfile") is None: try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) files[i - 1] = {i: None} continue _path: Optional[Path] = Path(tv.outfile) if tv is not None else None # if the item never got turned into a track or video if _path is None: files[i - 1] = {i: None} continue # if the track or video didn't download if _path.exists(): if _path.stat().st_size == 0: files[i - 1] = {i: None} continue else: files[i - 1] = {i: None} continue # otherwise, move files and clean up if isinstance(tv, Track): new_path: Path = self.playlist_dir / f"{i:03d} - {tv.trackname}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} elif isinstance(tv, Video): new_path: Path = self.playlist_dir / f"{i:03d} - {_path.name}" new_path.write_bytes(_path.read_bytes()) _path.unlink() files[i - 1] = {i: str(new_path.absolute())} else: self.files: List[Dict[int, Optional[str]]] = files # Find all subdirectories written to subdirs: Set[Path] = set() for tv in self.tracks_videos: if isinstance(tv, Track): try: getattr(tv, "album_dir") except AttributeError: pass else: subdirs.add(tv.album_dir) subdirs.add(tv.album_dir.parent) elif isinstance(tv, Video): subdirs.add(tv.artist_dir) # Copy all artist images, artist bio JSON files out # of subdirs artist_images: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*.jpg"): if p.name == "cover.jpg": continue artist_images.add(p) else: for artist_image_path in artist_images: if artist_image_path.exists(): shutil.copyfile( artist_image_path.absolute(), self.playlist_dir / artist_image_path.name, ) artist_bios: Set[Path] = set() for subdir in subdirs: for p in subdir.glob("*bio.json"): artist_bios.add(p) else: for artist_bio_path in artist_bios: if artist_bio_path.exists(): shutil.copyfile( artist_bio_path.absolute(), self.playlist_dir / artist_bio_path.name, ) # Remove all subdirs for subdir in subdirs: if subdir.exists(): shutil.rmtree(subdir) else: return self.playlist_dir def craft_m3u8_text(self): """This method creates a file called playlist.m3u8 in self.playlist_dir that is a standard M3U. Needs to be called after self.flatten_playlist_dir in order to be able to access self.files N.b. the already-written file is temporarily copied to a .mp4 version in a temporary directory because .m4a files cannot be read with mutagen.""" m3u_text: str = f"#EXTM3U\n#EXTENC:UTF-8\n#EXTIMG:{str(self.cover_path.absolute())}\n#PLAYLIST:{self.name}\n" logger.info( f"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'" ) for d in self.files: file: str = next(iter(d.values())) if file is None: continue elif file.endswith(".flac"): m = mutagen.File(file) artist: str = m.get("artist", [""])[0] title: str = m.get("title", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".mka"): m = mutagen.File(file) artist: str = m.get("ARTI", [""])[0] title: str = m.get("TITL", [""])[0] extinf: str = ( f"#EXTINF:{math.ceil(m.info.length)}," f"{artist} - {title}\n{file}\n" ) m3u_text += extinf elif file.endswith(".m4a"): # Mutagen cannot read .m4a files, so make a copy with all # of the metadata tags as a .mp4 in a temporary directory
with temporary_file(suffix=".mp4") as tf:
7
2023-12-12 21:50:25+00:00
16k
Deltares/imod-python
imod/tests/fixtures/mf6_flow_with_transport_fixture.py
[ { "identifier": "InitialConditions", "path": "imod/mf6/ic.py", "snippet": "class InitialConditions(Package):\n \"\"\"\n Initial Conditions (IC) Package information is read from the file that is\n specified by \"IC6\" as the file type. Only one IC Package can be specified\n for a GWF model.\n https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=46\n\n Parameters\n ----------\n head: array of floats (xr.DataArray)\n for backwards compatibility this argument is maintained, but please use\n the start-argument instead.\n start: array of floats (xr.DataArray)\n is the initial (starting) head or concentration—that is, the simulation's\n initial state.\n STRT must be specified for all simulations, including steady-state simulations.\n One value is read for every model cell. For\n simulations in which the first stress period is steady state, the values\n used for STRT generally do not affect the simulation (exceptions may\n occur if cells go dry and (or) rewet). The execution time, however, will\n be less if STRT includes hydraulic heads that are close to the\n steadystate solution. A head value lower than the cell bottom can be\n provided if a cell should start as dry. (strt)\n validate: {True, False}\n Flag to indicate whether the package should be validated upon\n initialization. This raises a ValidationError if package input is\n provided in the wrong manner. Defaults to True.\n \"\"\"\n\n _pkg_id = \"ic\"\n _grid_data = {\"head\": np.float64}\n _keyword_map = {\"head\": \"strt\"}\n _template = Package._initialize_template(_pkg_id)\n\n _init_schemata = {\n \"start\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n }\n _write_schemata = {\n \"start\": [\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ],\n }\n\n _grid_data = {\"start\": np.float64}\n _keyword_map = {\"start\": \"strt\"}\n _template = Package._initialize_template(_pkg_id)\n\n _regrid_method = {\n \"start\": (\n RegridderType.OVERLAP,\n \"mean\",\n ), # TODO set to barycentric once supported\n }\n\n def __init__(self, start=None, head=None, validate: bool = True):\n super().__init__(locals())\n if start is None:\n start = head\n warnings.warn(\n 'The keyword argument \"head\" is deprecated. Please use the start argument.',\n DeprecationWarning,\n )\n if head is None:\n raise ValueError(\"start and head arguments cannot both be None\")\n else:\n if head is not None:\n raise ValueError(\"start and head arguments cannot both be defined\")\n\n self.dataset[\"start\"] = start\n self._validate_init_schemata(validate)\n\n def render(self, directory, pkgname, globaltimes, binary):\n d = {}\n\n icdirectory = directory / pkgname\n d[\"layered\"], d[\"strt\"] = self._compose_values(\n self[\"start\"], icdirectory, \"strt\", binary=binary\n )\n return self._template.render(d)" }, { "identifier": "GroundwaterFlowModel", "path": "imod/mf6/model.py", "snippet": "class GroundwaterFlowModel(Modflow6Model):\n _mandatory_packages = (\"npf\", \"ic\", \"oc\", \"sto\")\n _model_id = \"gwf6\"\n\n def __init__(\n self,\n listing_file: str = None,\n print_input: bool = False,\n print_flows: bool = False,\n save_flows: bool = False,\n newton: bool = False,\n under_relaxation: bool = False,\n ):\n super().__init__()\n self._options = {\n \"listing_file\": listing_file,\n \"print_input\": print_input,\n \"print_flows\": print_flows,\n \"save_flows\": save_flows,\n \"newton\": newton,\n \"under_relaxation\": under_relaxation,\n }\n self._template = initialize_template(\"gwf-nam.j2\")\n\n def _get_unique_regridder_types(self) -> Dict[RegridderType, str]:\n \"\"\"\n This function loops over the packages and collects all regridder-types that are in use.\n Differences in associated functions are ignored. It focusses only on the types. So if a\n model uses both Overlap(mean) and Overlap(harmonic_mean), this function will return just one\n Overlap regridder: the first one found, in this case Overlap(mean)\n \"\"\"\n methods = {}\n for pkg_name, pkg in self.items():\n if pkg.is_regridding_supported():\n pkg_methods = pkg.get_regrid_methods()\n for variable in pkg_methods:\n if (\n variable in pkg.dataset.data_vars\n and pkg.dataset[variable].values[()] is not None\n ):\n regriddertype = pkg_methods[variable][0]\n if regriddertype not in methods.keys():\n functiontype = pkg_methods[variable][1]\n methods[regriddertype] = functiontype\n else:\n raise NotImplementedError(\n f\"regridding is not implemented for package {pkg_name} of type {type(pkg)}\"\n )\n return methods\n\n def clip_box(\n self,\n time_min: Optional[str] = None,\n time_max: Optional[str] = None,\n layer_min: Optional[int] = None,\n layer_max: Optional[int] = None,\n x_min: Optional[float] = None,\n x_max: Optional[float] = None,\n y_min: Optional[float] = None,\n y_max: Optional[float] = None,\n state_for_boundary: Optional[GridDataArray] = None,\n ):\n clipped = super()._clip_box_packages(\n time_min, time_max, layer_min, layer_max, x_min, x_max, y_min, y_max\n )\n\n clipped_boundary_condition = self.__create_boundary_condition_clipped_boundary(\n self, clipped, state_for_boundary\n )\n if clipped_boundary_condition is not None:\n clipped[\"chd_clipped\"] = clipped_boundary_condition\n\n clipped.purge_empty_packages()\n return clipped\n\n def __create_boundary_condition_clipped_boundary(\n self,\n original_model: Modflow6Model,\n clipped_model: Modflow6Model,\n state_for_boundary: Optional[GridDataArray],\n ):\n unassigned_boundary_original_domain = (\n self.__create_boundary_condition_for_unassigned_boundary(\n original_model, state_for_boundary\n )\n )\n\n return self.__create_boundary_condition_for_unassigned_boundary(\n clipped_model, state_for_boundary, [unassigned_boundary_original_domain]\n )\n\n @staticmethod\n def __create_boundary_condition_for_unassigned_boundary(\n model: Modflow6Model,\n state_for_boundary: Optional[GridDataArray],\n additional_boundaries: Optional[List[imod.mf6.ConstantHead]] = None,\n ):\n if state_for_boundary is None:\n return None\n\n constant_head_packages = [\n pkg for name, pkg in model.items() if isinstance(pkg, imod.mf6.ConstantHead)\n ]\n\n additional_boundaries = [\n item for item in additional_boundaries or [] if item is not None\n ]\n\n constant_head_packages.extend(additional_boundaries)\n\n return create_clipped_boundary(\n model.domain, state_for_boundary, constant_head_packages\n )\n\n def is_use_newton(self):\n return self._options[\"newton\"]\n\n def set_newton(self, is_newton: bool) -> None:\n self._options[\"newton\"] = is_newton" }, { "identifier": "NodePropertyFlow", "path": "imod/mf6/npf.py", "snippet": "class NodePropertyFlow(Package):\n \"\"\"\n Node Property Flow package.\n\n In this package the hydraulic conductivity and rewetting in the model is\n specified. A single NPF Package is required for each GWF model.\n https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=51\n\n A note about regridding: the fields k, k22, k33 define the principal\n components of an anisotropic conductivity tensor. By default, k and k22 are\n in the horizontal plane and k33 is vertical. Angle1, angle2 and angle3\n define the rotation of this tensor. The regridding methods associated by\n default are chosen based on the assumption that k and k22 are horizontal and\n k33 is vertical. If this is not the case, it is up to the user to regrid the\n npf package using other regridding methods. This may be recommended if for\n example the rotation is such that k has become vertical and k33 horizontal.\n\n Parameters\n ----------\n icelltype: array of int (xr.DataArray)\n flag for each cell that specifies how saturated thickness is treated. 0\n means saturated thickness is held constant; >0 means saturated thickness\n varies with computed head when head is below the cell top; <0 means\n saturated thickness varies with computed head unless the\n starting_head_as_confined_thickness option is in effect. When\n starting_head_as_confined_thickness is in effect, a negative value of\n icelltype indicates that saturated thickness will be computed as\n strt-bot and held constant.\n k: array of floats (xr.DataArray)\n is the hydraulic conductivity. For the common case in which the user\n would like to specify the horizontal hydraulic conductivity and the\n vertical hydraulic conductivity, then K should be assigned as the\n horizontal hydraulic conductivity, K33 should be assigned as the\n vertical hydraulic conductivity, and K22 and the three rotation\n angles should not be specified. When more sophisticated anisotropy is\n required, then K corresponds to the K11 hydraulic conductivity axis. All\n included cells (idomain > 0) must have a K value greater than zero\n rewet: ({True, False}, optional)\n activates model rewetting.\n Default is False.\n rewet_layer: float\n is a combination of the wetting threshold and a flag to indicate which\n neighboring cells can cause a cell to become wet. If rewet_layer < 0,\n only a cell below a dry cell can cause the cell to become wet. If\n rewet_layer > 0, the cell below a dry cell and horizontally adjacent\n cells can cause a cell to become wet. If rewet_layer is 0, the cell\n cannot be wetted. The absolute value of rewet_layer is the wetting\n threshold. When the sum of BOT and the absolute value of rewet_layer at\n a dry cell is equaled or exceeded by the head at an adjacent cell, the\n cell is wetted. rewet_layer must be specified if \"rewet\" is specified in\n the OPTIONS block. If \"rewet\" is not specified in the options block,\n then rewet_layer can be entered, and memory will be allocated for it,\n even though it is not used. (WETDRY)\n Default is None.\n rewet_factor:\n is a keyword and factor that is included in the calculation of the head\n that is initially established at a cell when that cell is converted from\n dry to wet. (WETFCT)\n Default is None.\n rewet_iterations:\n is a keyword and iteration interval for attempting to wet cells. Wetting\n is attempted every rewet_iterations iteration. This applies to outer\n iterations and not inner iterations. If rewet_iterations is specified as\n zero or less, then the value is changed to 1. (IWETIT)\n Default is None.\n rewet_method:\n is a keyword and integer flag that determines which equation is used to\n define the initial head at cells that become wet. If rewet_method is 0,\n h = BOT + rewet_factor (hm - BOT). If rewet_method is not 0, h = BOT +\n rewet_factor (THRESH). (IHDWET)\n Default is None.\n k22: array of floats (xr.DataArray)\n is the hydraulic conductivity of the second ellipsoid axis; for an\n unrotated case this is the hydraulic conductivity in the y direction. If\n K22 is not included, then K22 is set equal to K.\n For a regular MODFLOW grid (DIS Package is used) in which no rotation\n angles are specified, K22 is the hydraulic conductivity along columns in\n the y direction. For an unstructured DISU grid, the user must assign\n principal x and y axes and provide the angle for each cell face relative\n to the assigned x direction. All included cells (idomain > 0) must have\n a K22 value greater than zero.\n Default is None.\n k33: array of floats (xr.DataArray)\n is the hydraulic conductivity of the third ellipsoid axis; for an\n unrotated case, this is the vertical hydraulic conductivity. When\n anisotropy is applied, K33 corresponds to the K33 tensor component. All\n included cells (idomain > 0) must have a K33 value greater than zero.\n Default is None.\n angle1: float\n is a rotation angle of the hydraulic conductivity tensor in degrees. The\n angle represents the first of three sequential rotations of the\n hydraulic conductivity ellipsoid. With the K11, K22, and K33 axes of the\n ellipsoid initially aligned with the x, y, and z coordinate axes,\n respectively, angle1 rotates the ellipsoid about its K33 axis (within\n the x - y plane). A positive value represents counter-clockwise rotation\n when viewed from any point on the positive K33 axis, looking toward the\n center of the ellipsoid. A value of zero indicates that the K11 axis\n lies within the x - z plane. If angle1 is not specified, default values\n of zero are assigned to angle1, angle2, and angle3, in which case the\n K11, K22, and K33 axes are aligned with the x, y, and z axes,\n respectively.\n Default is None.\n angle2: float\n is a rotation angle of the hydraulic conductivity tensor in degrees. The\n angle represents the second of three sequential rotations of the\n hydraulic conductivity ellipsoid. Following the rotation by angle1\n described above, angle2 rotates the ellipsoid about its K22 axis (out of\n the x - y plane). An array can be specified for angle2 only if angle1 is\n also specified. A positive value of angle2 represents clockwise rotation\n when viewed from any point on the positive K22 axis, looking toward the\n center of the ellipsoid. A value of zero indicates that the K11 axis\n lies within the x - y plane. If angle2 is not specified, default values\n of zero are assigned to angle2 and angle3; connections that are not\n user-designated as vertical are assumed to be strictly horizontal (that\n is, to have no z component to their orientation); and connection lengths\n are based on horizontal distances.\n Default is None.\n angle3: float\n is a rotation angle of the hydraulic conductivity tensor in degrees. The\n angle represents the third of three sequential rotations of the\n hydraulic conductivity ellipsoid. Following the rotations by angle1 and\n angle2 described above, angle3 rotates the ellipsoid about its K11 axis.\n An array can be specified for angle3 only if angle1 and angle2 are also\n specified. An array must be specified for angle3 if angle2 is specified.\n A positive value of angle3 represents clockwise rotation when viewed\n from any point on the positive K11 axis, looking toward the center of\n the ellipsoid. A value of zero indicates that the K22 axis lies within\n the x - y plane.\n Default is None.\n alternative_cell_averaging : str\n Method calculating horizontal cell connection conductance.\n Options: {\"LOGARITHMIC\", \"AMT-LMK\", or \"AMT-HMK\"}\n Default: uses harmonic mean for averaging\n save_flows: ({True, False}, optional)\n keyword to indicate that cell-by-cell flow terms will be written to the\n file specified with \"budget save file\" in Output Control.\n Default is False.\n starting_head_as_confined_thickness: ({True, False}, optional)\n indicates that cells having a negative icelltype are confined, and their\n cell thickness for conductance calculations will be computed as strt-bot\n rather than top-bot.\n (THICKSTRT)\n Default is False.\n variable_vertical_conductance: ({True, False}, optional)\n keyword to indicate that the vertical conductance will be calculated\n using the saturated thickness and properties of the overlying cell and\n the thickness and properties of the underlying cell. if the dewatered\n keyword is also specified, then the vertical conductance is calculated\n using only the saturated thickness and properties of the overlying cell\n if the head in the underlying cell is below its top. if these keywords\n are not specified, then the default condition is to calculate the\n vertical conductance at the start of the simulation using the initial\n head and the cell properties. the vertical conductance remains constant\n for the entire simulation.\n (VARIABLECV)\n Default is False.\n dewatered: ({True, False}, optional)\n If the dewatered keyword is specified, then the vertical conductance is\n calculated using only the saturated thickness and properties of the\n overlying cell if the head in the underlying cell is below its top.\n Default is False.\n perched: ({True, False}, optional)\n keyword to indicate that when a cell is overlying a dewatered\n convertible cell, the head difference used in Darcy’s Law is equal to\n the head in the overlying cell minus the bottom elevation of the\n overlying cell. If not specified, then the default is to use the head\n difference between the two cells.\n Default is False.\n save_specific_discharge: ({True, False}, optional)\n keyword to indicate that x, y, and z components of specific discharge\n will be calculated at cell centers and written to the cell-by-cell flow\n file, which is specified with\"budget save file\" in Output Control. If\n this option is activated, then additional information may be required in\n the discretization packages and the GWF Exchange package (if GWF models\n are coupled). Specifically, angldegx must be specified in the\n connectiondata block of the disu package; angldegx must also be\n specified for the GWF Exchange as an auxiliary variable. disu package\n has not been implemented yet.\n Default is False.\n save_saturation: ({True, False}, optional)\n keyword to indicate that cell saturation will be written to the budget\n file, which is specified with \"BUDGET SAVE FILE\" in Output Control.\n Saturation will be saved to the budget file as an auxiliary variable\n saved with the DATA-SAT text label. Saturation is a cell variable that\n ranges from zero to one and can be used by post processing programs to\n determine how much of a cell volume is saturated. If ICELLTYPE is 0,\n then saturation is always one.\n xt3d_option: ({True, False}, optional)\n If True, the XT3D formulation will be used. By default False.\n rhs_option: ({True, False}, optional)\n If True, then the XT3D additional terms will be added to the right-hand\n side. If False, then the XT3D terms will be put into the coefficient\n matrix. By default False.\n validate: {True, False}\n Flag to indicate whether the package should be validated upon\n initialization. This raises a ValidationError if package input is\n provided in the wrong manner. Defaults to True.\n \"\"\"\n\n _pkg_id = \"npf\"\n\n _init_schemata = {\n \"icelltype\": [\n DTypeSchema(np.integer),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"k\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"rewet_layer\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"k22\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"k33\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"angle1\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"angle2\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"angle3\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ],\n \"alternative_cell_averaging\": [DTypeSchema(str)],\n \"save_flows\": [DTypeSchema(np.bool_)],\n \"starting_head_as_confined_thickness\": [DTypeSchema(np.bool_)],\n \"variable_vertical_conductance\": [DTypeSchema(np.bool_)],\n \"dewatered\": [DTypeSchema(np.bool_)],\n \"perched\": [DTypeSchema(np.bool_)],\n \"save_specific_discharge\": [DTypeSchema(np.bool_)],\n }\n\n _write_schemata = {\n \"k\": (\n AllValueSchema(\">\", 0.0),\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ),\n \"rewet_layer\": (\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ),\n \"k22\": (\n AllValueSchema(\">\", 0.0),\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n # No need to check coords: dataset ensures they align with idomain.\n ),\n \"k33\": (\n AllValueSchema(\">\", 0.0),\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n # No need to check coords: dataset ensures they align with idomain.\n ),\n \"angle1\": (IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),),\n \"angle2\": (IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),),\n \"angle3\": (IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),),\n }\n\n _grid_data = {\n \"icelltype\": np.int32,\n \"k\": np.float64,\n \"rewet_layer\": np.float64,\n \"k22\": np.float64,\n \"k33\": np.float64,\n \"angle1\": np.float64,\n \"angle2\": np.float64,\n \"angle3\": np.float64,\n }\n _keyword_map = {\n \"rewet\": \"rewet_record\",\n \"rewet_factor\": \"wetfct\",\n \"rewet_method\": \"ihdwet\",\n \"rewet_layer\": \"wetdry\",\n \"variable_vertical_conductance\": \"variablecv\",\n \"starting_head_as_confined_thickness\": \"thickstrt\",\n \"rewet_iterations\": \"iwetit\",\n \"xt3d_option\": \"xt3doptions\",\n \"rhs_option\": \"rhs\",\n }\n _template = Package._initialize_template(_pkg_id)\n\n _regrid_method = {\n \"icelltype\": (RegridderType.OVERLAP, \"mean\"),\n \"k\": (RegridderType.OVERLAP, \"geometric_mean\"), # horizontal if angle2 = 0\n \"k22\": (\n RegridderType.OVERLAP,\n \"geometric_mean\",\n ), # horizontal if angle2 = 0 & angle3 = 0\n \"k33\": (\n RegridderType.OVERLAP,\n \"harmonic_mean\",\n ), # vertical if angle2 = 0 & angle3 = 0\n \"angle1\": (RegridderType.OVERLAP, \"mean\"),\n \"angle2\": (RegridderType.OVERLAP, \"mean\"),\n \"angle3\": (RegridderType.OVERLAP, \"mean\"),\n \"rewet_layer\": (RegridderType.OVERLAP, \"mean\"),\n }\n\n def __init__(\n self,\n icelltype,\n k,\n rewet=False,\n rewet_layer=None,\n rewet_factor=None,\n rewet_iterations=None,\n rewet_method=None,\n k22=None,\n k33=None,\n angle1=None,\n angle2=None,\n angle3=None,\n cell_averaging=None,\n alternative_cell_averaging=None,\n save_flows=False,\n starting_head_as_confined_thickness=False,\n variable_vertical_conductance=False,\n dewatered=False,\n perched=False,\n save_specific_discharge=False,\n save_saturation=False,\n xt3d_option=False,\n rhs_option=False,\n validate: bool = True,\n ):\n super().__init__(locals())\n # check rewetting\n if not rewet and any(\n [rewet_layer, rewet_factor, rewet_iterations, rewet_method]\n ):\n raise ValueError(\n \"rewet_layer, rewet_factor, rewet_iterations, and rewet_method should\"\n \" all be left at a default value of None if rewet is False.\"\n )\n self.dataset[\"icelltype\"] = icelltype\n self.dataset[\"k\"] = k\n self.dataset[\"rewet\"] = rewet\n self.dataset[\"rewet_layer\"] = rewet_layer\n self.dataset[\"rewet_factor\"] = rewet_factor\n self.dataset[\"rewet_iterations\"] = rewet_iterations\n self.dataset[\"rewet_method\"] = rewet_method\n self.dataset[\"k22\"] = k22\n self.dataset[\"k33\"] = k33\n self.dataset[\"angle1\"] = angle1\n self.dataset[\"angle2\"] = angle2\n self.dataset[\"angle3\"] = angle3\n if cell_averaging is not None:\n warnings.warn(\n \"Use of `cell_averaging` is deprecated, please use `alternative_cell_averaging` instead\",\n DeprecationWarning,\n )\n self.dataset[\"alternative_cell_averaging\"] = cell_averaging\n else:\n self.dataset[\"alternative_cell_averaging\"] = alternative_cell_averaging\n\n self.dataset[\"save_flows\"] = save_flows\n self.dataset[\n \"starting_head_as_confined_thickness\"\n ] = starting_head_as_confined_thickness\n self.dataset[\"variable_vertical_conductance\"] = variable_vertical_conductance\n self.dataset[\"dewatered\"] = dewatered\n self.dataset[\"perched\"] = perched\n self.dataset[\"save_specific_discharge\"] = save_specific_discharge\n self.dataset[\"save_saturation\"] = save_saturation\n self.dataset[\"xt3d_option\"] = xt3d_option\n self.dataset[\"rhs_option\"] = rhs_option\n self._validate_init_schemata(validate)\n\n def get_xt3d_option(self) -> bool:\n \"\"\"\n Returns the xt3d option value for this object.\n \"\"\"\n return self.dataset[\"xt3d_option\"].values[()]\n\n def set_xt3d_option(self, is_xt3d_used: bool, is_rhs: bool) -> None:\n \"\"\"\n Returns the xt3d option value for this object.\n \"\"\"\n self.dataset[\"rhs_option\"] = is_rhs\n self.dataset[\"xt3d_option\"] = is_xt3d_used\n\n @property\n def is_variable_vertical_conductance(self) -> bool:\n \"\"\"\n Returns the VariableCV option value for this object.\n \"\"\"\n return self.dataset[\"variable_vertical_conductance\"].values[()]\n\n @property\n def is_dewatered(self) -> bool:\n \"\"\"\n Returns the \"dewatered\" option value for this object. Used only when variable_vertical_conductance is true\n \"\"\"\n return self.dataset[\"dewatered\"].values[()]" }, { "identifier": "OutputControl", "path": "imod/mf6/oc.py", "snippet": "class OutputControl(Package):\n \"\"\"\n The Output Control Option determines how and when heads, budgets and/or\n concentrations are printed to the listing file and/or written to a separate\n binary output file.\n https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.4.2.pdf#page=53\n\n Currently the settings \"first\", \"last\", \"all\", and \"frequency\"\n are supported, the \"steps\" setting is not supported, because of\n its ragged nature. Furthermore, only one setting per stress period\n can be specified in imod-python.\n\n Parameters\n ----------\n save_head : {string, integer}, or xr.DataArray of {string, integer}, optional\n String or integer indicating output control for head file (.hds)\n If string, should be one of [\"first\", \"last\", \"all\"].\n If integer, interpreted as frequency.\n save_budget : {string, integer}, or xr.DataArray of {string, integer}, optional\n String or integer indicating output control for cell budgets (.cbc)\n If string, should be one of [\"first\", \"last\", \"all\"].\n If integer, interpreted as frequency.\n save_concentration : {string, integer}, or xr.DataArray of {string, integer}, optional\n String or integer indicating output control for concentration file (.ucn)\n If string, should be one of [\"first\", \"last\", \"all\"].\n If integer, interpreted as frequency.\n validate: {True, False}\n Flag to indicate whether the package should be validated upon\n initialization. This raises a ValidationError if package input is\n provided in the wrong manner. Defaults to True.\n\n Examples\n --------\n To specify a mix of both 'frequency' and 'first' setting,\n we need to specify an array with both integers and strings.\n For this we need to create a numpy object array first,\n otherwise xarray converts all to strings automatically.\n\n >>> time = [np.datetime64(\"2000-01-01\"), np.datetime64(\"2000-01-02\")]\n >>> data = np.array([\"last\", 5], dtype=\"object\")\n >>> save_head = xr.DataArray(data, coords={\"time\": time}, dims=(\"time\"))\n >>> oc = imod.mf6.OutputControl(save_head=save_head, save_budget=None, save_concentration=None)\n\n \"\"\"\n\n _pkg_id = \"oc\"\n _keyword_map = {}\n _template = Package._initialize_template(_pkg_id)\n\n _init_schemata = {\n \"save_head\": [\n DTypeSchema(np.integer) | DTypeSchema(str) | DTypeSchema(object),\n ],\n \"save_budget\": [\n DTypeSchema(np.integer) | DTypeSchema(str) | DTypeSchema(object),\n ],\n \"save_concentration\": [\n DTypeSchema(np.integer) | DTypeSchema(str) | DTypeSchema(object),\n ],\n }\n\n _write_schemata = {}\n _regrid_method = {}\n\n def __init__(\n self,\n save_head=None,\n save_budget=None,\n save_concentration=None,\n head_file=None,\n budget_file=None,\n concentration_file=None,\n validate: bool = True,\n ):\n super().__init__()\n\n save_concentration = (\n None if is_dataarray_none(save_concentration) else save_concentration\n )\n save_head = None if is_dataarray_none(save_head) else save_head\n save_budget = None if is_dataarray_none(save_budget) else save_budget\n\n if save_head is not None and save_concentration is not None:\n raise ValueError(\"save_head and save_concentration cannot both be defined.\")\n\n self.dataset[\"save_head\"] = save_head\n self.dataset[\"save_concentration\"] = save_concentration\n self.dataset[\"save_budget\"] = save_budget\n self.dataset[\"head_file\"] = head_file\n self.dataset[\"budget_file\"] = budget_file\n self.dataset[\"concentration_file\"] = concentration_file\n self._validate_init_schemata(validate)\n\n def _get_ocsetting(self, setting):\n \"\"\"Get oc setting based on its type. If integers return f'frequency {setting}', if\"\"\"\n if isinstance(setting, (int, np.integer)) and not isinstance(setting, bool):\n return f\"frequency {setting}\"\n elif isinstance(setting, str):\n if setting.lower() in [\"first\", \"last\", \"all\"]:\n return setting.lower()\n else:\n raise ValueError(\n f\"Output Control received wrong string. String should be one of ['first', 'last', 'all'], instead got {setting}\"\n )\n else:\n raise TypeError(\n f\"Output Control setting should be either integer or string in ['first', 'last', 'all'], instead got {setting}\"\n )\n\n def _get_output_filepath(self, directory: Path, output_variable: str) -> Path:\n varname = f\"{output_variable}_file\"\n ext = OUTPUT_EXT_MAPPING[output_variable]\n modelname = directory.stem\n\n filepath = self.dataset[varname].values[()]\n if filepath is None:\n filepath = directory / f\"{modelname}.{ext}\"\n else:\n filepath = Path(filepath)\n\n if filepath.is_absolute():\n path = filepath\n else:\n # Get path relative to the simulation name file.\n sim_directory = directory.parent\n path = Path(os.path.relpath(filepath, sim_directory))\n\n return path\n\n def render(self, directory, pkgname, globaltimes, binary):\n d = {}\n\n for output_variable in OUTPUT_EXT_MAPPING.keys():\n save = self.dataset[f\"save_{output_variable}\"].values[()]\n if save is not None:\n varname = f\"{output_variable}_file\"\n output_path = self._get_output_filepath(directory, output_variable)\n d[varname] = output_path.as_posix()\n\n periods = collections.defaultdict(dict)\n for datavar in (\"save_head\", \"save_concentration\", \"save_budget\"):\n if self.dataset[datavar].values[()] is None:\n continue\n key = datavar.replace(\"_\", \" \")\n if \"time\" in self.dataset[datavar].coords:\n package_times = self.dataset[datavar].coords[\"time\"].values\n starts = np.searchsorted(globaltimes, package_times) + 1\n for i, s in enumerate(starts):\n setting = self.dataset[datavar].isel(time=i).item()\n periods[s][key] = self._get_ocsetting(setting)\n\n else:\n setting = self.dataset[datavar].item()\n periods[1][key] = self._get_ocsetting(setting)\n\n d[\"periods\"] = periods\n\n return self._template.render(d)\n\n def write(\n self,\n pkgname: str,\n globaltimes: Union[List, np.ndarray],\n write_context: WriteContext,\n ):\n # We need to overload the write here to ensure the output directory is\n # created in advance for MODFLOW6.\n super().write(pkgname, globaltimes, write_context)\n\n for datavar in (\"head_file\", \"concentration_file\", \"budget_file\"):\n path = self.dataset[datavar].values[()]\n if path is not None:\n filepath = Path(path)\n filepath.parent.mkdir(parents=True, exist_ok=True)\n return\n\n @property\n def is_budget_output(self) -> bool:\n return self.dataset[\"save_budget\"].values[()] is not None" }, { "identifier": "River", "path": "imod/mf6/riv.py", "snippet": "class River(BoundaryCondition):\n \"\"\"\n River package.\n Any number of RIV Packages can be specified for a single groundwater flow\n model.\n https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=71\n\n Parameters\n ----------\n stage: array of floats (xr.DataArray)\n is the head in the river.\n conductance: array of floats (xr.DataArray)\n is the riverbed hydraulic conductance.\n bottom_elevation: array of floats (xr.DataArray)\n is the elevation of the bottom of the riverbed.\n concentration: array of floats (xr.DataArray, optional)\n if this flow package is used in simulations also involving transport, then this array is used\n as the concentration for inflow over this boundary.\n concentration_boundary_type: ({\"AUX\", \"AUXMIXED\"}, optional)\n if this flow package is used in simulations also involving transport, then this keyword specifies\n how outflow over this boundary is computed.\n print_input: ({True, False}, optional)\n keyword to indicate that the list of river information will be written\n to the listing file immediately after it is read. Default is False.\n print_flows: ({True, False}, optional)\n Indicates that the list of river flow rates will be printed to the\n listing file for every stress period time step in which \"BUDGET PRINT\"\n is specified in Output Control. If there is no Output Control option and\n PRINT FLOWS is specified, then flow rates are printed for the last time\n step of each stress period. Default is False.\n save_flows: ({True, False}, optional)\n Indicates that river flow terms will be written to the file specified\n with \"BUDGET FILEOUT\" in Output Control. Default is False.\n observations: [Not yet supported.]\n Default is None.\n validate: {True, False}\n Flag to indicate whether the package should be validated upon\n initialization. This raises a ValidationError if package input is\n provided in the wrong manner. Defaults to True.\n repeat_stress: Optional[xr.DataArray] of datetimes\n Used to repeat data for e.g. repeating stress periods such as\n seasonality without duplicating the values. The DataArray should have\n dimensions ``(\"repeat\", \"repeat_items\")``. The ``repeat_items``\n dimension should have size 2: the first value is the \"key\", the second\n value is the \"value\". For the \"key\" datetime, the data of the \"value\"\n datetime will be used. Can also be set with a dictionary using the\n ``set_repeat_stress`` method.\n \"\"\"\n\n _pkg_id = \"riv\"\n _period_data = (\"stage\", \"conductance\", \"bottom_elevation\")\n _keyword_map = {}\n\n _init_schemata = {\n \"stage\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema((\"layer\",)),\n BOUNDARY_DIMS_SCHEMA,\n ],\n \"conductance\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema((\"layer\",)),\n BOUNDARY_DIMS_SCHEMA,\n ],\n \"bottom_elevation\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema((\"layer\",)),\n BOUNDARY_DIMS_SCHEMA,\n ],\n \"concentration\": [\n DTypeSchema(np.floating),\n IndexesSchema(),\n CoordsSchema(\n (\n \"species\",\n \"layer\",\n )\n ),\n CONC_DIMS_SCHEMA,\n ],\n \"print_input\": [DTypeSchema(np.bool_), DimsSchema()],\n \"print_flows\": [DTypeSchema(np.bool_), DimsSchema()],\n \"save_flows\": [DTypeSchema(np.bool_), DimsSchema()],\n }\n _write_schemata = {\n \"stage\": [\n AllValueSchema(\">=\", \"bottom_elevation\"),\n OtherCoordsSchema(\"idomain\"),\n AllNoDataSchema(), # Check for all nan, can occur while clipping\n AllInsideNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ],\n \"conductance\": [IdentityNoDataSchema(\"stage\"), AllValueSchema(\">\", 0.0)],\n \"bottom_elevation\": [\n IdentityNoDataSchema(\"stage\"),\n # Check river bottom above layer bottom, else Modflow throws error.\n AllValueSchema(\">\", \"bottom\"),\n ],\n \"concentration\": [IdentityNoDataSchema(\"stage\"), AllValueSchema(\">=\", 0.0)],\n }\n\n _template = BoundaryCondition._initialize_template(_pkg_id)\n _auxiliary_data = {\"concentration\": \"species\"}\n\n _regrid_method = {\n \"stage\": (RegridderType.OVERLAP, \"mean\"),\n \"conductance\": (RegridderType.RELATIVEOVERLAP, \"conductance\"),\n \"bottom_elevation\": (RegridderType.OVERLAP, \"mean\"),\n \"concentration\": (RegridderType.OVERLAP, \"mean\"),\n }\n\n def __init__(\n self,\n stage,\n conductance,\n bottom_elevation,\n concentration=None,\n concentration_boundary_type=\"aux\",\n print_input=False,\n print_flows=False,\n save_flows=False,\n observations=None,\n validate: bool = True,\n repeat_stress=None,\n ):\n super().__init__(locals())\n self.dataset[\"stage\"] = stage\n self.dataset[\"conductance\"] = conductance\n self.dataset[\"bottom_elevation\"] = bottom_elevation\n if concentration is not None:\n self.dataset[\"concentration\"] = concentration\n self.dataset[\"concentration_boundary_type\"] = concentration_boundary_type\n add_periodic_auxiliary_variable(self)\n self.dataset[\"print_input\"] = print_input\n self.dataset[\"print_flows\"] = print_flows\n self.dataset[\"save_flows\"] = save_flows\n self.dataset[\"observations\"] = observations\n self.dataset[\"repeat_stress\"] = repeat_stress\n self._validate_init_schemata(validate)\n\n def _validate(self, schemata, **kwargs):\n # Insert additional kwargs\n kwargs[\"stage\"] = self[\"stage\"]\n kwargs[\"bottom_elevation\"] = self[\"bottom_elevation\"]\n errors = super()._validate(schemata, **kwargs)\n\n return errors" }, { "identifier": "SpecificStorage", "path": "imod/mf6/sto.py", "snippet": "class SpecificStorage(StorageBase):\n \"\"\"\n Storage Package with specific storage.\n\n From wikipedia (https://en.wikipedia.org/wiki/Specific_storage):\n\n \"The specific storage is the amount of water that a portion of an aquifer\n releases from storage, per unit mass or volume of aquifer, per unit change\n in hydraulic head, while remaining fully saturated.\"\n\n If the STO Package is not included for a model, then storage changes will\n not be calculated, and thus, the model will be steady state. Only one STO\n Package can be specified for a GWF model.\n\n Parameters\n ----------\n specific_storage: array of floats (xr.DataArray)\n Is specific storage. Specific storage values must be greater than\n or equal to 0. (ss)\n specific_yield: array of floats (xr.DataArray)\n Is specific yield. Specific yield values must be greater than or\n equal to 0. Specific yield does not have to be specified if there are no\n convertible cells (convertible=0 in every cell). (sy)\n transient: ({True, False}), or a DataArray with a time coordinate and dtype Bool\n Boolean to indicate if the model is transient or steady-state.\n convertible: array of int (xr.DataArray)\n Is a flag for each cell that specifies whether or not a cell is\n convertible for the storage calculation. 0 indicates confined storage is\n used. >0 indicates confined storage is used when head is above cell top\n and a mixed formulation of unconfined and confined storage is used when\n head is below cell top. (iconvert)\n save_flows: ({True, False}, optional)\n Indicates that storage flow terms will be written to the file specified\n with \"BUDGET FILEOUT\" in Output Control. Default is False.\n validate: {True, False}\n Flag to indicate whether the package should be validated upon\n initialization. This raises a ValidationError if package input is\n provided in the wrong manner. Defaults to True.\n \"\"\"\n\n _pkg_id = \"sto\"\n _grid_data = {\n \"convertible\": np.int32,\n \"specific_storage\": np.float64,\n \"specific_yield\": np.float64,\n }\n _keyword_map = {\n \"specific_storage\": \"ss\",\n \"specific_yield\": \"sy\",\n \"convertible\": \"iconvert\",\n }\n\n _init_schemata = {\n \"specific_storage\": (\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ),\n \"specific_yield\": (\n DTypeSchema(np.floating),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ),\n \"transient\": (\n DTypeSchema(np.bool_),\n IndexesSchema(),\n DimsSchema(\"time\") | DimsSchema(),\n ),\n \"convertible\": (\n DTypeSchema(np.integer),\n IndexesSchema(),\n PKG_DIMS_SCHEMA,\n ),\n \"save_flows\": (DTypeSchema(np.bool_), DimsSchema()),\n }\n\n _write_schemata = {\n \"specific_storage\": (\n AllValueSchema(\">=\", 0.0),\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n # No need to check coords: dataset ensures they align with idomain.\n ),\n \"specific_yield\": (\n AllValueSchema(\">=\", 0.0),\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ),\n \"convertible\": (\n IdentityNoDataSchema(other=\"idomain\", is_other_notnull=(\">\", 0)),\n ),\n }\n\n _regrid_method = {\n \"convertible\": (RegridderType.OVERLAP, \"mode\"),\n \"specific_storage\": (RegridderType.OVERLAP, \"mean\"),\n \"specific_yield\": (RegridderType.OVERLAP, \"mean\"),\n }\n\n _template = Package._initialize_template(_pkg_id)\n\n def __init__(\n self,\n specific_storage,\n specific_yield,\n transient,\n convertible,\n save_flows: bool = False,\n validate: bool = True,\n ):\n super().__init__(locals())\n self.dataset[\"specific_storage\"] = specific_storage\n self.dataset[\"specific_yield\"] = specific_yield\n self.dataset[\"convertible\"] = convertible\n self.dataset[\"transient\"] = transient\n self.dataset[\"save_flows\"] = save_flows\n self._validate_init_schemata(validate)\n\n def render(self, directory, pkgname, globaltimes, binary):\n d = self._render_dict(directory, pkgname, globaltimes, binary)\n return self._template.render(d)" } ]
import numpy as np import pytest import xarray as xr from imod.mf6 import ( GroundwaterFlowModel, InitialConditions, NodePropertyFlow, OutputControl, River, SpecificStorage, )
12,564
def conductance_fc(): globaltimes = np.array( [ "2000-01-01", "2000-01-02", "2000-01-03", ], dtype="datetime64[ns]", ) idomain = get_data_array(grid_dimensions(), globaltimes) # Constant head conductance = xr.full_like(idomain, np.nan) return conductance @pytest.fixture(scope="session") def elevation_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) elevation = xr.full_like(idomain, np.nan) elevation[:, 0, 7, 7:9] = 1.0 return elevation @pytest.fixture(scope="session") def rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) rate = xr.full_like(idomain, np.nan) rate[:, 0, 7, 7:9] = 0.001 return rate @pytest.fixture(scope="session") def proportion_rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_rate = xr.full_like(idomain, np.nan) proportion_rate[:, 0, 7, 7:9] = 0.3 return proportion_rate @pytest.fixture(scope="session") def proportion_depth_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_depth = xr.full_like(idomain, np.nan) proportion_depth[:, 0, 7, 7:9] = 0.4 return proportion_depth @pytest.fixture(scope="session") def porosity_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) porosity_fc = xr.full_like(idomain, np.nan).isel(time=0) return porosity_fc @pytest.fixture(scope="session") def decay_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_fc @pytest.fixture(scope="session") def decay_sorbed_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_sorbed_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_sorbed_fc @pytest.fixture(scope="session") def bulk_density_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) bulk_density_fc = xr.full_like(idomain, np.nan).isel(time=0) return bulk_density_fc @pytest.fixture(scope="session") def distcoef_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) distcoef_fc = xr.full_like(idomain, np.nan).isel(time=0) return distcoef_fc @pytest.fixture(scope="session") def sp2_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) sp2_fc = xr.full_like(idomain, np.nan).isel(time=0) return sp2_fc @pytest.fixture(scope="function") @pytest.mark.usefixtures("concentration_fc") def flow_model_with_concentration(concentration_fc): idomain = get_data_array(grid_dimensions(), globaltimes) cellType = xr.full_like(idomain.isel(time=0), 1, dtype=np.int32) k = xr.full_like(idomain.isel(time=0), 10.0) k33 = xr.full_like(idomain.isel(time=0), 10.0) # River riv_dict = dict( stage=idomain.sel(layer=1), conductance=idomain.sel(layer=1), bottom_elevation=idomain.sel(layer=1) - 1.0, concentration=concentration_fc.sel(layer=1), ) gwf_model = GroundwaterFlowModel()
globaltimes = np.array( [ "2000-01-01", "2000-01-02", "2000-01-03", ], dtype="datetime64[ns]", ) class grid_dimensions: nlay = 3 nrow = 15 ncol = 15 dx = 5000 dy = -5000 xmin = 0 ymin = 0 def get_data_array(dimensions, globaltimes): ntimes = len(globaltimes) shape = (ntimes, dimensions.nlay, dimensions.nrow, dimensions.ncol) dims = ("time", "layer", "y", "x") layer = np.array([1, 2, 3]) xmax = dimensions.dx * dimensions.ncol ymax = abs(dimensions.dy) * dimensions.nrow y = np.arange(ymax, dimensions.ymin, dimensions.dy) + 0.5 * dimensions.dy x = np.arange(dimensions.xmin, xmax, dimensions.dx) + 0.5 * dimensions.dx coords = {"time": globaltimes, "layer": layer, "y": y, "x": x} # Discretization data return xr.DataArray( np.ones(shape), coords=coords, dims=dims, ) @pytest.fixture(scope="session") def head_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) # Constant head head = xr.full_like(idomain, np.nan) return head @pytest.fixture(scope="session") def concentration_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) idomain = idomain.expand_dims(species=["salinity", "temperature"]) concentration = xr.full_like(idomain, np.nan) return concentration @pytest.fixture(scope="session") def conductance_fc(): globaltimes = np.array( [ "2000-01-01", "2000-01-02", "2000-01-03", ], dtype="datetime64[ns]", ) idomain = get_data_array(grid_dimensions(), globaltimes) # Constant head conductance = xr.full_like(idomain, np.nan) return conductance @pytest.fixture(scope="session") def elevation_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) elevation = xr.full_like(idomain, np.nan) elevation[:, 0, 7, 7:9] = 1.0 return elevation @pytest.fixture(scope="session") def rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) rate = xr.full_like(idomain, np.nan) rate[:, 0, 7, 7:9] = 0.001 return rate @pytest.fixture(scope="session") def proportion_rate_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_rate = xr.full_like(idomain, np.nan) proportion_rate[:, 0, 7, 7:9] = 0.3 return proportion_rate @pytest.fixture(scope="session") def proportion_depth_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) proportion_depth = xr.full_like(idomain, np.nan) proportion_depth[:, 0, 7, 7:9] = 0.4 return proportion_depth @pytest.fixture(scope="session") def porosity_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) porosity_fc = xr.full_like(idomain, np.nan).isel(time=0) return porosity_fc @pytest.fixture(scope="session") def decay_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_fc @pytest.fixture(scope="session") def decay_sorbed_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) decay_sorbed_fc = xr.full_like(idomain, np.nan).isel(time=0) return decay_sorbed_fc @pytest.fixture(scope="session") def bulk_density_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) bulk_density_fc = xr.full_like(idomain, np.nan).isel(time=0) return bulk_density_fc @pytest.fixture(scope="session") def distcoef_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) distcoef_fc = xr.full_like(idomain, np.nan).isel(time=0) return distcoef_fc @pytest.fixture(scope="session") def sp2_fc(): idomain = get_data_array(grid_dimensions(), globaltimes) sp2_fc = xr.full_like(idomain, np.nan).isel(time=0) return sp2_fc @pytest.fixture(scope="function") @pytest.mark.usefixtures("concentration_fc") def flow_model_with_concentration(concentration_fc): idomain = get_data_array(grid_dimensions(), globaltimes) cellType = xr.full_like(idomain.isel(time=0), 1, dtype=np.int32) k = xr.full_like(idomain.isel(time=0), 10.0) k33 = xr.full_like(idomain.isel(time=0), 10.0) # River riv_dict = dict( stage=idomain.sel(layer=1), conductance=idomain.sel(layer=1), bottom_elevation=idomain.sel(layer=1) - 1.0, concentration=concentration_fc.sel(layer=1), ) gwf_model = GroundwaterFlowModel()
gwf_model["npf"] = NodePropertyFlow(
2
2023-12-08 13:57:59+00:00
16k
camenduru/MotionDirector-hf
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n \n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n frame_step: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.frame_step = frame_step\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_range(self, vr):\n return get_video_frames(\n vr, \n self.sample_start_idx, \n self.frame_step, \n self.n_sample_frames\n )\n \n def get_vid_idx(self, vr, vid_data=None):\n frames = self.n_sample_frames\n\n if vid_data is not None:\n idx = vid_data['frame_index']\n else:\n idx = self.sample_start_idx\n\n return idx\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n frame_range = self.get_frame_range(vr)\n frames = vr.get_batch(frame_range)\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def train_data_batch(self, index):\n\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n\n vid_data = self.train_data[index]\n\n clip_path = vid_data['clip_path']\n \n # Get video prompt\n prompt = vid_data['prompt']\n\n video, _ = self.process_video_wrapper(clip_path)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n # Assign train data\n train_data = self.train_data[index]\n \n # Get the frame of the current index.\n self.sample_start_idx = train_data['frame_index']\n \n # Initialize resize\n resize = None\n\n video, vr = self.process_video_wrapper(train_data[self.vid_data_key])\n\n # Get video prompt\n prompt = train_data['prompt']\n vr.seek(0)\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'json'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n \n # Initialize variables\n video = None\n prompt = None\n prompt_ids = None\n\n # Use default JSON training\n if self.train_data is not None:\n video, prompt, prompt_ids = self.train_data_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(0, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n # width, height = sensible_buckets(self.width, self.height, h, w)\n width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": (video / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n\n self.image_dir = self.get_images_list(image_dir)\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img = train_data\n\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=1)\n\n prompt = get_text_prompt(\n file_path=train_data,\n text_prompt=self.single_img_prompt,\n fallback_prompt=self.fallback_prompt,\n ext_types=self.img_types, \n use_caption=True\n )\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n if os.path.exists(self.image_dir[0]):\n return len(self.image_dir)\n else:\n return 0\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": (img / 127.5 - 1.0),\n \"prompt_ids\": prompt_ids[0],\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n # width, height = self.width, self.height\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n def get_prompt_ids(self, prompt):\n return self.tokenizer(\n prompt,\n truncation=True,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n ).input_ids\n\n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n\n video, _ = self.process_video_wrapper(self.video_files[index])\n\n prompt = self.fallback_prompt\n\n prompt_ids = self.get_prompt_ids(prompt)\n\n return {\"pixel_values\": (video[0] / 127.5 - 1.0), \"prompt_ids\": prompt_ids[0], \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)" }, { "identifier": "extract_lora_child_module", "path": "utils/lora.py", "snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras" }, { "identifier": "ddim_inversion", "path": "utils/ddim_utils.py", "snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents" } ]
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
11,373
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process.
for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]:
2
2023-12-11 04:51:39+00:00
16k
ZS-YANG/FemtoDet-v3
mmdet/models/dense_heads/atss_vlfusion_head.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "cat_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def cat_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n dim: int = 0) -> Union[Tensor, BaseBoxes]:\n \"\"\"Concatenate boxes with type of tensor or box type.\n\n Args:\n data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n or box types need to be concatenated.\n dim (int): The dimension over which the box are concatenated.\n Defaults to 0.\n\n Returns:\n Union[Tensor, :obj`BaseBoxes`]: Concatenated results.\n \"\"\"\n if data_list and isinstance(data_list[0], BaseBoxes):\n return data_list[0].cat(data_list, dim=dim)\n else:\n return torch.cat(data_list, dim=dim)" }, { "identifier": "reduce_mean", "path": "mmdet/utils/dist_utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "InstanceList", "path": "mmdet/utils/typing_utils.py", "snippet": "" }, { "identifier": "filter_scores_and_topk", "path": "mmdet/models/utils/misc.py", "snippet": "def filter_scores_and_topk(scores, score_thr, topk, results=None):\n \"\"\"Filter results using score threshold and topk candidates.\n\n Args:\n scores (Tensor): The scores, shape (num_bboxes, K).\n score_thr (float): The score filter threshold.\n topk (int): The number of topk candidates.\n results (dict or list or Tensor, Optional): The results to\n which the filtering rule is to be applied. The shape\n of each item is (num_bboxes, N).\n\n Returns:\n tuple: Filtered results\n\n - scores (Tensor): The scores after being filtered, \\\n shape (num_bboxes_filtered, ).\n - labels (Tensor): The class labels, shape \\\n (num_bboxes_filtered, ).\n - anchor_idxs (Tensor): The anchor indexes, shape \\\n (num_bboxes_filtered, ).\n - filtered_results (dict or list or Tensor, Optional): \\\n The filtered results. The shape of each item is \\\n (num_bboxes_filtered, N).\n \"\"\"\n valid_mask = scores > score_thr\n scores = scores[valid_mask]\n valid_idxs = torch.nonzero(valid_mask)\n\n num_topk = min(topk, valid_idxs.size(0))\n # torch.sort is actually faster than .topk (at least on GPUs)\n scores, idxs = scores.sort(descending=True)\n scores = scores[:num_topk]\n topk_idxs = valid_idxs[idxs[:num_topk]]\n keep_idxs, labels = topk_idxs.unbind(dim=1)\n\n filtered_results = None\n if results is not None:\n if isinstance(results, dict):\n filtered_results = {k: v[keep_idxs] for k, v in results.items()}\n elif isinstance(results, list):\n filtered_results = [result[keep_idxs] for result in results]\n elif isinstance(results, torch.Tensor):\n filtered_results = results[keep_idxs]\n else:\n raise NotImplementedError(f'Only supports dict or list or Tensor, '\n f'but get {type(results)}.')\n return scores, labels, keep_idxs, filtered_results" }, { "identifier": "select_single_mlvl", "path": "mmdet/models/utils/misc.py", "snippet": "def select_single_mlvl(mlvl_tensors, batch_id, detach=True):\n \"\"\"Extract a multi-scale single image tensor from a multi-scale batch\n tensor based on batch index.\n\n Note: The default value of detach is True, because the proposal gradient\n needs to be detached during the training of the two-stage model. E.g\n Cascade Mask R-CNN.\n\n Args:\n mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,\n each is a 4D-tensor.\n batch_id (int): Batch index.\n detach (bool): Whether detach gradient. Default True.\n\n Returns:\n list[Tensor]: Multi-scale single image tensor.\n \"\"\"\n assert isinstance(mlvl_tensors, (list, tuple))\n num_levels = len(mlvl_tensors)\n\n if detach:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id].detach() for i in range(num_levels)\n ]\n else:\n mlvl_tensor_list = [\n mlvl_tensors[i][batch_id] for i in range(num_levels)\n ]\n return mlvl_tensor_list" }, { "identifier": "unpack_gt_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n on ``batch_data_samples``\n\n Args:\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n tuple:\n\n - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n - batch_img_metas (list[dict]): Meta information of each image,\n e.g., image size, scaling factor, etc.\n \"\"\"\n batch_gt_instances = []\n batch_gt_instances_ignore = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n if 'ignored_instances' in data_sample:\n batch_gt_instances_ignore.append(data_sample.ignored_instances)\n else:\n batch_gt_instances_ignore.append(None)\n\n return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas" }, { "identifier": "BertEncoderLayer", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class BertEncoderLayer(BertPreTrainedModel):\n \"\"\"A modified version of the `BertLayer` class from the\n `transformers.models.bert.modeling_bert` module.\n\n Args:\n config (:class:`~transformers.BertConfig`):\n The configuration object that\n contains various parameters for the model.\n clamp_min_for_underflow (bool, optional):\n Whether to clamp the minimum value of the hidden states\n to prevent underflow. Defaults to `False`.\n clamp_max_for_overflow (bool, optional):\n Whether to clamp the maximum value of the hidden states\n to prevent overflow. Defaults to `False`.\n \"\"\"\n\n def __init__(self,\n config: BertConfig,\n clamp_min_for_underflow: bool = False,\n clamp_max_for_overflow: bool = False):\n super().__init__(config)\n self.config = config\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n\n self.attention = BertAttention(config, clamp_min_for_underflow,\n clamp_max_for_overflow)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self, inputs: Dict[str, Dict[str, torch.Tensor]]\n ) -> Dict[str, Dict[str, torch.Tensor]]:\n \"\"\"Applies the BertEncoderLayer to the input features.\"\"\"\n language_dict_features = inputs['lang']\n hidden_states = language_dict_features['hidden']\n attention_mask = language_dict_features['masks']\n\n device = hidden_states.device\n input_shape = hidden_states.size()[:-1]\n extended_attention_mask = self.get_extended_attention_mask(\n attention_mask, input_shape, device)\n\n self_attention_outputs = self.attention(\n hidden_states,\n extended_attention_mask,\n None,\n output_attentions=False,\n past_key_value=None)\n attention_output = self_attention_outputs[0]\n outputs = self_attention_outputs[1:]\n layer_output = apply_chunking_to_forward(self.feed_forward_chunk,\n self.chunk_size_feed_forward,\n self.seq_len_dim,\n attention_output)\n outputs = (layer_output, ) + outputs\n hidden_states = outputs[0]\n\n language_dict_features['hidden'] = hidden_states\n\n features_dict = {\n 'visual': inputs['visual'],\n 'lang': language_dict_features\n }\n\n return features_dict\n\n def feed_forward_chunk(self, attention_output: Tensor) -> Tensor:\n \"\"\"Applies the intermediate and output layers of the BertEncoderLayer\n to a chunk of the input sequence.\"\"\"\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output" }, { "identifier": "VLFuse", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "class VLFuse(nn.Module):\n \"\"\"Early Fusion Module.\n\n Args:\n v_dim (int): Dimension of visual features.\n l_dim (int): Dimension of language features.\n embed_dim (int): The embedding dimension for the attention operation.\n num_heads (int): Number of attention heads.\n dropout (float): Dropout probability.\n drop_path (float): Drop path probability.\n use_checkpoint (bool): Whether to use PyTorch's checkpoint function.\n \"\"\"\n\n def __init__(self,\n v_dim: int = 256,\n l_dim: int = 768,\n embed_dim: int = 2048,\n num_heads: int = 8,\n dropout: float = 0.1,\n drop_path: float = 0.0,\n use_checkpoint: bool = False):\n super().__init__()\n self.use_checkpoint = use_checkpoint\n self.b_attn = BiAttentionBlock(\n v_dim=v_dim,\n l_dim=l_dim,\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=dropout,\n drop_path=drop_path,\n init_values=1.0 / 6.0)\n\n def forward(self, x: dict) -> dict:\n \"\"\"Forward pass of the VLFuse module.\"\"\"\n visual_features = x['visual']\n language_dict_features = x['lang']\n\n if self.use_checkpoint:\n # vf is mean visual_features\n # checkpoint does not allow complex data structures as input,\n # such as list, so we must split them.\n vf0, vf1, vf2, vf3, vf4, language_features = checkpoint.checkpoint(\n self.b_attn, *visual_features,\n language_dict_features['hidden'],\n language_dict_features['masks'])\n else:\n vf0, vf1, vf2, vf3, vf4, language_features = self.b_attn(\n *visual_features, language_dict_features['hidden'],\n language_dict_features['masks'])\n\n language_dict_features['hidden'] = language_features\n fused_language_dict_features = language_dict_features\n\n features_dict = {\n 'visual': [vf0, vf1, vf2, vf3, vf4],\n 'lang': fused_language_dict_features\n }\n\n return features_dict" }, { "identifier": "permute_and_flatten", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "def permute_and_flatten(layer: Tensor, N: int, A: int, C: int, H: int,\n W: int) -> Tensor:\n \"\"\"Permute and then flatten a tensor,\n\n from size (N, A, C, H, W) to (N, H * W * A, C).\n\n Args:\n layer (Tensor): Tensor of shape (N, C, H, W).\n N (int): Batch size.\n A (int): Number of attention heads.\n C (int): Number of channels.\n H (int): Height of feature map.\n W (int): Width of feature map.\n\n Returns:\n Tensor: A Tensor of shape (N, H * W * A, C).\n \"\"\"\n layer = layer.view(N, A, C, H, W)\n layer = layer.permute(0, 3, 4, 1, 2)\n layer = layer.reshape(N, -1, C)\n return layer" }, { "identifier": "MAX_CLAMP_VALUE", "path": "mmdet/models/utils/vlfuse_helper.py", "snippet": "MAX_CLAMP_VALUE = 50000" }, { "identifier": "ATSSHead", "path": "mmdet/models/dense_heads/atss_head.py", "snippet": "class ATSSHead(AnchorHead):\n \"\"\"Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.\n\n ATSS head structure is similar with FCOS, however ATSS use anchor boxes\n and assign label by Adaptive Training Sample Selection instead max-iou.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n pred_kernel_size (int): Kernel size of ``nn.Conv2d``\n stacked_convs (int): Number of stacking convs of the head.\n conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for\n convolution layer. Defaults to None.\n norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization\n layer. Defaults to ``dict(type='GN', num_groups=32,\n requires_grad=True)``.\n reg_decoded_bbox (bool): If true, the regression loss would be\n applied directly on decoded bounding boxes, converting both\n the predicted boxes and regression targets to absolute\n coordinates format. Defaults to False. It should be `True` when\n using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.\n Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,\n loss_weight=1.0)``.\n init_cfg (:obj:`ConfigDict` or dict or list[dict] or\n list[:obj:`ConfigDict`]): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n num_classes: int,\n in_channels: int,\n pred_kernel_size: int = 3,\n stacked_convs: int = 4,\n conv_cfg: OptConfigType = None,\n norm_cfg: ConfigType = dict(\n type='GN', num_groups=32, requires_grad=True),\n reg_decoded_bbox: bool = True,\n loss_centerness: ConfigType = dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n init_cfg: MultiConfig = dict(\n type='Normal',\n layer='Conv2d',\n std=0.01,\n override=dict(\n type='Normal',\n name='atss_cls',\n std=0.01,\n bias_prob=0.01)),\n **kwargs) -> None:\n self.pred_kernel_size = pred_kernel_size\n self.stacked_convs = stacked_convs\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n super().__init__(\n num_classes=num_classes,\n in_channels=in_channels,\n reg_decoded_bbox=reg_decoded_bbox,\n init_cfg=init_cfg,\n **kwargs)\n\n self.sampling = False\n self.loss_centerness = MODELS.build(loss_centerness)\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers of the head.\"\"\"\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of classification scores and bbox prediction\n cls_scores (list[Tensor]): Classification scores for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for all scale\n levels, each is a 4D-tensor, the channels number is\n num_anchors * 4.\n \"\"\"\n return multi_apply(self.forward_single, x, self.scales)\n\n def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:\n \"\"\"Forward feature of a single scale level.\n\n Args:\n x (Tensor): Features of a single scale level.\n scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n\n Returns:\n tuple:\n cls_score (Tensor): Cls scores for a single scale level\n the channels number is num_anchors * num_classes.\n bbox_pred (Tensor): Box energies / deltas for a single scale\n level, the channels number is num_anchors * 4.\n centerness (Tensor): Centerness for a single scale level, the\n channel number is (N, num_anchors * 1, H, W).\n \"\"\"\n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.atss_cls(cls_feat)\n # we just follow atss, not apply exp in bbox_pred\n bbox_pred = scale(self.atss_reg(reg_feat)).float()\n centerness = self.atss_centerness(reg_feat)\n return cls_score, bbox_pred, centerness\n\n def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,\n bbox_pred: Tensor, centerness: Tensor,\n labels: Tensor, label_weights: Tensor,\n bbox_targets: Tensor, avg_factor: float) -> dict:\n \"\"\"Calculate the loss of a single scale level based on the features\n extracted by the detection head.\n\n Args:\n cls_score (Tensor): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W).\n bbox_pred (Tensor): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W).\n anchors (Tensor): Box reference for each scale level with shape\n (N, num_total_anchors, 4).\n labels (Tensor): Labels of each anchors with shape\n (N, num_total_anchors).\n label_weights (Tensor): Label weights of each anchor with shape\n (N, num_total_anchors)\n bbox_targets (Tensor): BBox regression targets of each anchor with\n shape (N, num_total_anchors, 4).\n avg_factor (float): Average factor that is used to average\n the loss. When using sampling method, avg_factor is usually\n the sum of positive and negative priors. When using\n `PseudoSampler`, `avg_factor` is usually equal to the number\n of positive priors.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n\n anchors = anchors.reshape(-1, 4)\n cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n -1, self.cls_out_channels).contiguous()\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n centerness = centerness.permute(0, 2, 3, 1).reshape(-1)\n bbox_targets = bbox_targets.reshape(-1, 4)\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n\n # classification loss\n loss_cls = self.loss_cls(\n cls_score, labels, label_weights, avg_factor=avg_factor)\n\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n bg_class_ind = self.num_classes\n pos_inds = ((labels >= 0)\n & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n if len(pos_inds) > 0:\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_bbox_pred = bbox_pred[pos_inds]\n pos_anchors = anchors[pos_inds]\n pos_centerness = centerness[pos_inds]\n\n centerness_targets = self.centerness_target(\n pos_anchors, pos_bbox_targets)\n pos_decode_bbox_pred = self.bbox_coder.decode(\n pos_anchors, pos_bbox_pred)\n\n # regression loss\n loss_bbox = self.loss_bbox(\n pos_decode_bbox_pred,\n pos_bbox_targets,\n weight=centerness_targets,\n avg_factor=1.0)\n\n # centerness loss\n loss_centerness = self.loss_centerness(\n pos_centerness, centerness_targets, avg_factor=avg_factor)\n\n else:\n loss_bbox = bbox_pred.sum() * 0\n loss_centerness = centerness.sum() * 0\n centerness_targets = bbox_targets.new_tensor(0.)\n\n return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()\n\n def loss_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n centernesses: List[Tensor],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None) -> dict:\n \"\"\"Calculate the loss based on the features extracted by the detection\n head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anchors * num_classes, H, W)\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level with shape (N, num_anchors * 4, H, W)\n centernesses (list[Tensor]): Centerness for each scale\n level with shape (N, num_anchors * 1, H, W)\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]\n assert len(featmap_sizes) == self.prior_generator.num_levels\n\n device = cls_scores[0].device\n anchor_list, valid_flag_list = self.get_anchors(\n featmap_sizes, batch_img_metas, device=device)\n\n cls_reg_targets = self.get_targets(\n anchor_list,\n valid_flag_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore=batch_gt_instances_ignore)\n\n (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n bbox_weights_list, avg_factor) = cls_reg_targets\n avg_factor = reduce_mean(\n torch.tensor(avg_factor, dtype=torch.float, device=device)).item()\n\n losses_cls, losses_bbox, loss_centerness, \\\n bbox_avg_factor = multi_apply(\n self.loss_by_feat_single,\n anchor_list,\n cls_scores,\n bbox_preds,\n centernesses,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n avg_factor=avg_factor)\n\n bbox_avg_factor = sum(bbox_avg_factor)\n bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n return dict(\n loss_cls=losses_cls,\n loss_bbox=losses_bbox,\n loss_centerness=loss_centerness)\n\n def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:\n \"\"\"Calculate the centerness between anchors and gts.\n\n Only calculate pos centerness targets, otherwise there may be nan.\n\n Args:\n anchors (Tensor): Anchors with shape (N, 4), \"xyxy\" format.\n gts (Tensor): Ground truth bboxes with shape (N, 4), \"xyxy\" format.\n\n Returns:\n Tensor: Centerness between anchors and gts.\n \"\"\"\n anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n l_ = anchors_cx - gts[:, 0]\n t_ = anchors_cy - gts[:, 1]\n r_ = gts[:, 2] - anchors_cx\n b_ = gts[:, 3] - anchors_cy\n\n left_right = torch.stack([l_, r_], dim=1)\n top_bottom = torch.stack([t_, b_], dim=1)\n centerness = torch.sqrt(\n (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *\n (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))\n assert not torch.isnan(centerness).any()\n return centerness\n\n def get_targets(self,\n anchor_list: List[List[Tensor]],\n valid_flag_list: List[List[Tensor]],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Get targets for ATSS head.\n\n This method is almost the same as `AnchorHead.get_targets()`. Besides\n returning the targets as the parent method does, it also returns the\n anchors as the first element of the returned tuple.\n \"\"\"\n num_imgs = len(batch_img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if batch_gt_instances_ignore is None:\n batch_gt_instances_ignore = [None] * num_imgs\n (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n all_bbox_weights, pos_inds_list, neg_inds_list,\n sampling_results_list) = multi_apply(\n self._get_targets_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n batch_gt_instances,\n batch_img_metas,\n batch_gt_instances_ignore,\n unmap_outputs=unmap_outputs)\n # Get `avg_factor` of all images, which calculate in `SamplingResult`.\n # When using sampling method, avg_factor is usually the sum of\n # positive and negative priors. When using `PseudoSampler`,\n # `avg_factor` is usually equal to the number of positive priors.\n avg_factor = sum(\n [results.avg_factor for results in sampling_results_list])\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets,\n num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights,\n num_level_anchors)\n return (anchors_list, labels_list, label_weights_list,\n bbox_targets_list, bbox_weights_list, avg_factor)\n\n def _get_targets_single(self,\n flat_anchors: Tensor,\n valid_flags: Tensor,\n num_level_anchors: List[int],\n gt_instances: InstanceData,\n img_meta: dict,\n gt_instances_ignore: Optional[InstanceData] = None,\n unmap_outputs: bool = True) -> tuple:\n \"\"\"Compute regression, classification targets for anchors in a single\n image.\n\n Args:\n flat_anchors (Tensor): Multi-level anchors of the image, which are\n concatenated into a single tensor of shape (num_anchors ,4)\n valid_flags (Tensor): Multi level valid flags of the image,\n which are concatenated into a single tensor of\n shape (num_anchors,).\n num_level_anchors (List[int]): Number of anchors of each scale\n level.\n gt_instances (:obj:`InstanceData`): Ground truth of instance\n annotations. It usually includes ``bboxes`` and ``labels``\n attributes.\n img_meta (dict): Meta information for current image.\n gt_instances_ignore (:obj:`InstanceData`, optional): Instances\n to be ignored during training. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n unmap_outputs (bool): Whether to map outputs back to the original\n set of anchors.\n\n Returns:\n tuple: N is the number of total anchors in the image.\n labels (Tensor): Labels of all anchors in the image with shape\n (N,).\n label_weights (Tensor): Label weights of all anchor in the\n image with shape (N,).\n bbox_targets (Tensor): BBox targets of all anchors in the\n image with shape (N, 4).\n bbox_weights (Tensor): BBox weights of all anchors in the\n image with shape (N, 4)\n pos_inds (Tensor): Indices of positive anchor with shape\n (num_pos,).\n neg_inds (Tensor): Indices of negative anchor with shape\n (num_neg,).\n sampling_result (:obj:`SamplingResult`): Sampling results.\n \"\"\"\n inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg['allowed_border'])\n if not inside_flags.any():\n raise ValueError(\n 'There is no valid anchor inside the image boundary. Please '\n 'check the image size and anchor sizes, or set '\n '``allowed_border`` to -1 to skip the condition.')\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n pred_instances = InstanceData(priors=anchors)\n assign_result = self.assigner.assign(pred_instances,\n num_level_anchors_inside,\n gt_instances, gt_instances_ignore)\n\n sampling_result = self.sampler.sample(assign_result, pred_instances,\n gt_instances)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n if self.reg_decoded_bbox:\n pos_bbox_targets = sampling_result.pos_gt_bboxes\n else:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_priors, sampling_result.pos_gt_bboxes)\n\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n\n labels[pos_inds] = sampling_result.pos_gt_labels\n if self.train_cfg['pos_weight'] <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg['pos_weight']\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = unmap(anchors, num_total_anchors, inside_flags)\n labels = unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds, sampling_result)\n\n def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n \"\"\"Get the number of valid anchors in every level.\"\"\"\n\n split_inside_flags = torch.split(inside_flags, num_level_anchors)\n num_level_anchors_inside = [\n int(flags.sum()) for flags in split_inside_flags\n ]\n return num_level_anchors_inside" } ]
import copy import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List, Optional, Sequence, Tuple, Union from mmcv.cnn import Scale from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModel from mmengine.structures import InstanceData from torch import Tensor from transformers import BertConfig from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import (BertEncoderLayer, VLFuse, filter_scores_and_topk, permute_and_flatten, select_single_mlvl, unpack_gt_instances) from ..utils.vlfuse_helper import MAX_CLAMP_VALUE from .atss_head import ATSSHead
10,959
self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits @MODELS.register_module() class ATSSVLFusionHead(ATSSHead): """ATSS head with visual-language fusion module. Args: early_fuse (bool): Whether to fuse visual and language features Defaults to False. use_checkpoint (bool): Whether to use checkpoint. Defaults to False. num_dyhead_blocks (int): Number of dynamic head blocks. Defaults to 6. lang_model_name (str): Name of the language model. Defaults to 'bert-base-uncased'. """ def __init__(self, *args, early_fuse: bool = False, use_checkpoint: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', init_cfg=None, **kwargs): super().__init__(*args, **kwargs, init_cfg=init_cfg) self.head = VLFusionModule( in_channels=self.in_channels, feat_channels=self.feat_channels, num_base_priors=self.num_base_priors, early_fuse=early_fuse, use_checkpoint=use_checkpoint, num_dyhead_blocks=num_dyhead_blocks, lang_model_name=lang_model_name) self.text_masks = None def _init_layers(self) -> None: """No need to initialize the ATSS head layer.""" pass def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple[Tensor]: """Forward function.""" bbox_preds, centerness, cls_logits = self.head(visual_feats, language_feats) return cls_logits, bbox_preds, centerness def loss(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples): outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(visual_feats, language_feats) self.text_masks = language_feats['masks'] loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict],
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: BertConfig = None def convert_grounding_to_cls_scores(logits: Tensor, positive_maps: List[dict]) -> Tensor: """Convert logits to class scores.""" assert len(positive_maps) == logits.shape[0] # batch size scores = torch.zeros(logits.shape[0], logits.shape[1], len(positive_maps[0])).to(logits.device) if positive_maps is not None: if all(x == positive_maps[0] for x in positive_maps): # only need to compute once positive_map = positive_maps[0] for label_j in positive_map: scores[:, :, label_j - 1] = logits[:, :, torch.LongTensor(positive_map[label_j] )].mean(-1) else: for i, positive_map in enumerate(positive_maps): for label_j in positive_map: scores[i, :, label_j - 1] = logits[ i, :, torch.LongTensor(positive_map[label_j])].mean(-1) return scores class Conv3x3Norm(nn.Module): """Conv3x3 and norm.""" def __init__(self, in_channels: int, out_channels: int, stride: int, groups: int = 1, use_dcn: bool = False, norm_type: Optional[Union[Sequence, str]] = None): super().__init__() if use_dcn: self.conv = ModulatedDeformConv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) if isinstance(norm_type, Sequence): assert len(norm_type) == 2 assert norm_type[0] == 'gn' gn_group = norm_type[1] norm_type = norm_type[0] if norm_type == 'bn': bn_op = nn.BatchNorm2d(out_channels) elif norm_type == 'gn': bn_op = nn.GroupNorm( num_groups=gn_group, num_channels=out_channels) if norm_type is not None: self.bn = bn_op else: self.bn = None def forward(self, x, **kwargs): x = self.conv(x, **kwargs) if self.bn: x = self.bn(x) return x class DyReLU(nn.Module): """Dynamic ReLU.""" def __init__(self, in_channels: int, out_channels: int, expand_ratio: int = 4): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.expand_ratio = expand_ratio self.out_channels = out_channels self.fc = nn.Sequential( nn.Linear(in_channels, in_channels // expand_ratio), nn.ReLU(inplace=True), nn.Linear(in_channels // expand_ratio, out_channels * self.expand_ratio), nn.Hardsigmoid(inplace=True)) def forward(self, x) -> Tensor: x_out = x b, c, h, w = x.size() x = self.avg_pool(x).view(b, c) x = self.fc(x).view(b, -1, 1, 1) a1, b1, a2, b2 = torch.split(x, self.out_channels, dim=1) a1 = (a1 - 0.5) * 2 + 1.0 a2 = (a2 - 0.5) * 2 b1 = b1 - 0.5 b2 = b2 - 0.5 out = torch.max(x_out * a1 + b1, x_out * a2 + b2) return out class DyConv(nn.Module): """Dynamic Convolution.""" def __init__(self, conv_func: Callable, in_channels: int, out_channels: int, use_dyfuse: bool = True, use_dyrelu: bool = False, use_dcn: bool = False): super().__init__() self.dyconvs = nn.ModuleList() self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 2)) if use_dyfuse: self.attnconv = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)) self.h_sigmoid = nn.Hardsigmoid(inplace=True) else: self.attnconv = None if use_dyrelu: self.relu = DyReLU(in_channels, out_channels) else: self.relu = nn.ReLU() if use_dcn: self.offset = nn.Conv2d( in_channels, 27, kernel_size=3, stride=1, padding=1) else: self.offset = None self.init_weights() def init_weights(self): for m in self.dyconvs.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() if self.attnconv is not None: for m in self.attnconv.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, inputs: dict) -> dict: visual_feats = inputs['visual'] out_vis_feats = [] for level, feature in enumerate(visual_feats): offset_conv_args = {} if self.offset is not None: offset_mask = self.offset(feature) offset = offset_mask[:, :18, :, :] mask = offset_mask[:, 18:, :, :].sigmoid() offset_conv_args = dict(offset=offset, mask=mask) temp_feats = [self.dyconvs[1](feature, **offset_conv_args)] if level > 0: temp_feats.append(self.dyconvs[2](visual_feats[level - 1], **offset_conv_args)) if level < len(visual_feats) - 1: temp_feats.append( F.upsample_bilinear( self.dyconvs[0](visual_feats[level + 1], **offset_conv_args), size=[feature.size(2), feature.size(3)])) mean_feats = torch.mean( torch.stack(temp_feats), dim=0, keepdim=False) if self.attnconv is not None: attn_feat = [] res_feat = [] for feat in temp_feats: res_feat.append(feat) attn_feat.append(self.attnconv(feat)) res_feat = torch.stack(res_feat) spa_pyr_attn = self.h_sigmoid(torch.stack(attn_feat)) mean_feats = torch.mean( res_feat * spa_pyr_attn, dim=0, keepdim=False) out_vis_feats.append(mean_feats) out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits @MODELS.register_module() class ATSSVLFusionHead(ATSSHead): """ATSS head with visual-language fusion module. Args: early_fuse (bool): Whether to fuse visual and language features Defaults to False. use_checkpoint (bool): Whether to use checkpoint. Defaults to False. num_dyhead_blocks (int): Number of dynamic head blocks. Defaults to 6. lang_model_name (str): Name of the language model. Defaults to 'bert-base-uncased'. """ def __init__(self, *args, early_fuse: bool = False, use_checkpoint: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', init_cfg=None, **kwargs): super().__init__(*args, **kwargs, init_cfg=init_cfg) self.head = VLFusionModule( in_channels=self.in_channels, feat_channels=self.feat_channels, num_base_priors=self.num_base_priors, early_fuse=early_fuse, use_checkpoint=use_checkpoint, num_dyhead_blocks=num_dyhead_blocks, lang_model_name=lang_model_name) self.text_masks = None def _init_layers(self) -> None: """No need to initialize the ATSS head layer.""" pass def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple[Tensor]: """Forward function.""" bbox_preds, centerness, cls_logits = self.head(visual_feats, language_feats) return cls_logits, bbox_preds, centerness def loss(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples): outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(visual_feats, language_feats) self.text_masks = language_feats['masks'] loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
3
2023-12-11 15:23:03+00:00
16k
merlresearch/PixPNet
pixpnet/protonets/lit_model.py
[ { "identifier": "get_metadata", "path": "pixpnet/data.py", "snippet": "def get_metadata(config):\n dataset = config.dataset.name.upper().replace(\"-\", \"\")\n metadata = DatasetMeta(\n output_size=DATA_NUM_OUTPUTS[dataset],\n input_channels=DATA_CHANNELS[dataset],\n input_size=_get_input_size(dataset),\n label_names=LABEL_NAMES.get(dataset),\n )\n return metadata" }, { "identifier": "LitData", "path": "pixpnet/lightning/lightning_data.py", "snippet": "class LitData(LightningDataModule):\n def __init__(self, config, num_workers=None, **kwargs):\n super().__init__()\n self.config = config\n self.train = self.train_no_aug = self.val = self.test = None\n self.kwargs = kwargs\n # Required to check if setup was called prior...\n # https://github.com/Lightning-AI/lightning/issues/9865\n self.datasets_loaded = False\n if num_workers is None:\n num_workers = num_cpus()\n self.num_workers = num_workers\n\n def setup(self, stage=None):\n \"\"\"called on every GPU\"\"\"\n if self.datasets_loaded:\n return\n\n logger.info(f\"Loading the {self.config.dataset.name} dataset \" f\"(val_size={self.config.dataset.val_size})\")\n\n datasets = get_datasets(self.config, **self.kwargs)\n\n if self.config.dataset.needs_unaugmented:\n self.train, self.train_no_aug, self.val, self.test = datasets\n else:\n self.train, self.val, self.test = datasets\n\n # get_datasets may modify val_size\n if self.config.dataset.val_size == 0:\n if self.trainer:\n self.trainer.limit_val_batches = 0\n self.trainer.num_sanity_val_steps = 0\n\n self.datasets_loaded = True\n\n def train_dataloader(self):\n return DataLoader(\n self.train,\n batch_size=self.config.train.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n drop_last=True,\n )\n\n def train_no_aug_dataloader(self):\n if not self.config.dataset.needs_unaugmented:\n raise ValueError(\"Unaugmented train data set requested, but \" \"--dataset.needs-unaugmented is False\")\n return DataLoader(\n self.train_no_aug,\n batch_size=self.config.train.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n drop_last=False,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val, batch_size=self.config.test.batch_size, num_workers=self.num_workers, drop_last=False\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test, batch_size=self.config.test.batch_size, num_workers=self.num_workers, drop_last=False\n )" }, { "identifier": "BaseLitModel", "path": "pixpnet/lightning/lit_module.py", "snippet": "class BaseLitModel(LightningModule, metaclass=ABCMeta):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.optimizer = None\n\n # training stats\n self._train_time_total = 0\n self._train_time_per_epoch = 0\n self._actual_epoch_count = 0\n self._infer_count = 0\n self._infer_batch_count = 0\n self._inference_time_per_sample = 0\n self._inference_time_per_batch = 0\n self._train_t0 = None\n self._inference_t0 = None\n\n @property\n def train_time_total(self):\n return self._train_time_total\n\n @property\n def train_time_per_epoch(self):\n return self._train_time_per_epoch\n\n @property\n def inference_time_per_sample(self):\n return self._inference_time_per_sample\n\n @property\n def inference_time_per_batch(self):\n return self._inference_time_per_batch\n\n @abstractmethod\n def _forward(self, *args, **kwargs) -> Any:\n raise NotImplementedError\n\n @staticmethod\n def _metric_per_split(metric, *args, **kwargs):\n return CollisionlessModuleDict(\n {\"train\": metric(*args, **kwargs), \"val\": metric(*args, **kwargs), \"test\": metric(*args, **kwargs)}\n )\n\n def forward(self, x, *args, **kwargs) -> Any:\n if not self.training:\n # only record inference time in non-training mode\n self._inference_t0 = time.time()\n out = self._forward(x, *args, **kwargs)\n if not self.training:\n duration = time.time() - self._inference_t0\n self._inference_time_per_batch = (self._inference_time_per_batch * self._infer_batch_count + duration) / (\n self._infer_batch_count + 1\n )\n self._infer_batch_count += 1\n self._inference_time_per_sample = (self._inference_time_per_sample * self._infer_count + duration) / (\n self._infer_count + len(x)\n )\n self._infer_count += len(x)\n return out\n\n def on_train_start(self):\n if self.config.debug:\n torch.autograd.set_detect_anomaly(True)\n hp_lr_metrics = {f\"hp/lr_group_{i}\": 0 for i in range(len(self.optimizer.param_groups))}\n for lit_logger in self.loggers:\n args = (hp_lr_metrics,) if isinstance(lit_logger, TensorBoardLogger) else ()\n lit_logger.log_hyperparams(self.config.optimizer, *args)\n lit_logger.log_hyperparams(self.config.train)\n lit_logger.log_hyperparams(self.config.model)\n\n def on_train_epoch_start(self) -> None:\n self._train_t0 = time.time()\n\n def on_train_epoch_end(self) -> None:\n duration = time.time() - self._train_t0\n self._train_time_total += duration\n # running mean\n self._train_time_per_epoch = (self._train_time_per_epoch * self._actual_epoch_count + duration) / (\n self._actual_epoch_count + 1\n )\n self._actual_epoch_count += 1\n\n def training_step(self, batch, batch_idx, dataset_idx=None):\n loss = self._shared_eval(batch, batch_idx, dataset_idx, \"train\")\n for i, param_group in enumerate(self.optimizer.param_groups):\n self.log(f\"hp/lr_group_{i}\", param_group[\"lr\"])\n return loss\n\n def validation_step(self, batch, batch_idx, dataset_idx=None):\n self._shared_eval(batch, batch_idx, dataset_idx, \"val\")\n\n def test_step(self, batch, batch_idx, dataset_idx=None):\n self._shared_eval(batch, batch_idx, dataset_idx, \"test\")\n\n @abstractmethod\n def _shared_eval(self, batch: Any, batch_idx: int, dataset_idx: int, prefix: str) -> torch.Tensor:\n \"\"\"\n Handle batch, compute forward, compute loss and other metrics,\n then return the loss.\n \"\"\"\n raise NotImplementedError" }, { "identifier": "get_optimizer_cls", "path": "pixpnet/optim.py", "snippet": "def get_optimizer_cls(\n config: argparse.Namespace,\n ignore: Optional[Set[str]] = None,\n) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:\n if ignore is None:\n ignore = set()\n try:\n optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]\n except KeyError:\n raise ValueError(f'No such optimizer \"{config.optimizer.name}\"')\n hparams, invalid_keys = intersect_func_and_kwargs(\n optimizer_cls,\n config.optimizer,\n exclude_func_args={\"params\"},\n exclude_kwargs={\"name\", \"throttle_lr\", \"lr_schedule\", \"lr_scheduler\", \"lr_factor\", \"warmup_period\"} | ignore,\n )\n if invalid_keys:\n logger.warning(\n f\"Will not pass the following invalid optimizer \"\n f\"hyperparameters to {optimizer_cls.__name__}: \"\n f'{\", \".join(invalid_keys)}'\n )\n logger.info(f\"Optimizer hyperparameters for {optimizer_cls.__name__}: \" f\"{hparams}\")\n return optimizer_cls, hparams" }, { "identifier": "get_scheduler", "path": "pixpnet/optim.py", "snippet": "def get_scheduler(optimizer: torch.optim.Optimizer, config: argparse.Namespace) -> LRWithWarmupMixin:\n \"\"\"\"\"\"\n if config.optimizer.warmup_period:\n lr_warmup = ExponentialWarmup(optimizer, warmup_period=config.optimizer.warmup_period)\n else:\n lr_warmup = None\n if config.optimizer.lr_scheduler == \"multistep\":\n lr_scheduler = MultiStepLRWithWarmup(\n optimizer,\n milestones=config.optimizer.lr_schedule,\n gamma=config.optimizer.lr_factor,\n last_epoch=-1,\n warmup=lr_warmup,\n )\n elif config.optimizer.lr_scheduler == \"step\":\n assert len(config.optimizer.lr_schedule) == 1, config.optimizer.lr_schedule\n lr_scheduler = StepLRWithWarmup(\n optimizer,\n step_size=config.optimizer.lr_schedule[0],\n gamma=config.optimizer.lr_factor,\n last_epoch=-1,\n warmup=lr_warmup,\n )\n elif config.optimizer.lr_scheduler == \"cosine\":\n lr_scheduler = CosineAnnealingLRWithWarmup(\n optimizer,\n T_max=config.train.epochs,\n eta_min=0,\n last_epoch=-1,\n warmup=lr_warmup,\n )\n else:\n raise NotImplementedError(f\"Scheduler {config.optimizer.lr_scheduler}\")\n\n return lr_scheduler" }, { "identifier": "ClusterLoss", "path": "pixpnet/protonets/loss.py", "snippet": "class ClusterLoss(nn.Module):\n def __init__(self, class_specific=True):\n super().__init__()\n self.class_specific = class_specific\n\n def forward(self, min_distances: Tensor, target: Tensor, model: ProtoNet) -> Tensor:\n # min_distances: N x P\n if self.class_specific:\n # prototypes_of_correct_class: batch_size x num_prototypes\n prototypes_of_correct_class = torch.t(model.prototype_class_identity[:, target])\n min_distances_target = torch.where(\n prototypes_of_correct_class.bool(),\n min_distances,\n torch.tensor(torch.inf, dtype=min_distances.dtype, device=min_distances.device),\n )\n min_min_distances, _ = torch.min(min_distances_target, dim=1)\n cluster_loss = torch.mean(min_min_distances)\n else:\n min_min_distances, _ = torch.min(min_distances, dim=1)\n cluster_loss = torch.mean(min_min_distances)\n\n return cluster_loss" }, { "identifier": "L1ReadoutLoss", "path": "pixpnet/protonets/loss.py", "snippet": "class L1ReadoutLoss(nn.Module):\n def __init__(self, class_specific=True):\n super().__init__()\n self.class_specific = class_specific\n\n def forward(self, model: ProtoNet) -> Tensor:\n last_layer = model.last_layer\n if isinstance(last_layer, GroupedLinear):\n l1_loss = last_layer.weight.norm(p=1)\n else:\n if self.class_specific:\n l1_mask = 1 - torch.t(model.prototype_class_identity)\n l1_loss = (last_layer.weight * l1_mask).norm(p=1)\n else:\n l1_loss = last_layer.weight.norm(p=1)\n\n return l1_loss" }, { "identifier": "SeparationLoss", "path": "pixpnet/protonets/loss.py", "snippet": "class SeparationLoss(nn.Module):\n @staticmethod\n def forward(min_distances: Tensor, target: Tensor, model: ProtoNet, return_avg: bool = False):\n \"\"\"\n Here we want to maximize the minimum of all minimum proto-patch\n distances (each being some patch that is closest to a given prototype)\n for each non-class prototype. In effect, for each sample, a patch is\n selected for each non-class prototype according to minimum distance. So,\n we end up with one patch and one prototype per sample after taking the\n minimum of the proto-patch distances.\n \"\"\"\n # min_distances: N x P\n # prototype_class_identity: P x C\n # prototypes_of_correct_class: N x P\n prototypes_of_correct_class = torch.t(model.prototype_class_identity[:, target]).bool()\n min_distances_nontarget = torch.where(\n prototypes_of_correct_class.bool(),\n torch.tensor(torch.inf, dtype=min_distances.dtype, device=min_distances.device),\n min_distances,\n )\n dists_to_nontarget_prototypes, _ = torch.min(min_distances_nontarget, dim=1)\n separation_loss = -torch.mean(dists_to_nontarget_prototypes)\n\n if not return_avg:\n return separation_loss\n # otherwise\n min_distances_nontarget = torch.where(\n prototypes_of_correct_class.bool(),\n torch.tensor(0, dtype=min_distances.dtype, device=min_distances.device),\n min_distances,\n )\n avg_separation_cost = torch.sum(min_distances_nontarget, dim=1) / torch.sum(\n ~prototypes_of_correct_class.bool(), dim=1\n )\n avg_separation_cost = -torch.mean(avg_separation_cost)\n return separation_loss, avg_separation_cost" }, { "identifier": "ProtoNet", "path": "pixpnet/protonets/models/protonet.py", "snippet": "class ProtoNet(nn.Module):\n # Buffers\n ones: torch.Tensor\n corresponding_sample_idxs: torch.Tensor\n min_fmap_idxs: torch.Tensor\n prototype_class_identity: Optional[torch.Tensor]\n # Parameters\n prototype_vectors: torch.nn.Parameter\n\n # Constants\n prototype_layer_stride = 1\n\n def __init__(\n self,\n features: nn.Module,\n feature_layer: str,\n rf_slices: Optional[SlicesType],\n num_prototypes: int,\n prototype_dim: int,\n prototype_kernel_size: int,\n num_classes: int,\n init_weights: bool = True,\n prototype_activation: Union[str, Callable] = \"log\",\n add_on_layers_type: str = \"regular\",\n class_specific: bool = True,\n epsilon: float = 1e-6,\n learn_prototypes: bool = True,\n incorrect_strength: float = -0.5,\n correct_strength: float = 1,\n readout_type: str = \"linear\",\n distance: str = \"l2\",\n ):\n \"\"\"\"\"\"\n super().__init__()\n self.prototype_shape = (num_prototypes, prototype_dim, prototype_kernel_size, prototype_kernel_size)\n self.num_prototypes = num_prototypes\n self.prototype_dim = prototype_dim\n self.prototype_kernel_size = prototype_kernel_size\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.learn_prototypes = learn_prototypes\n # prototype_activation could be 'log', 'linear',\n # or a callable that converts distance to similarity score\n self.prototype_activation = prototype_activation\n self.distance = distance\n self.feature_layer = feature_layer\n\n self.rf_slices = rf_slices\n self.rf_idxs = None\n self.rf_sizes = None\n if self.rf_slices is not None:\n Hz = len(self.rf_slices)\n Wz = len(self.rf_slices[0])\n self.rf_sizes = torch.zeros((Hz, Wz, 2), dtype=torch.int)\n self.rf_idxs = torch.zeros((Hz, Wz, 4), dtype=torch.int)\n for h in range(Hz):\n for w in range(Wz):\n # for patch h,w\n if len(self.rf_slices[h][w]) > 1:\n raise NotImplementedError\n for h_s, w_s in self.rf_slices[h][w]:\n # Start weighting approach\n h_size = h_s.stop - h_s.start\n w_size = w_s.stop - w_s.start\n self.rf_sizes[h, w] = torch.tensor([h_size, w_size], dtype=torch.int)\n self.rf_idxs[h, w] = torch.tensor([h_s.start, h_s.stop, w_s.start, w_s.stop], dtype=torch.int)\n\n self.incorrect_strength = incorrect_strength\n self.correct_strength = correct_strength\n self.class_specific = class_specific\n if self.class_specific:\n # Here we are initializing the class identities of the prototypes.\n # Without domain specific knowledge we allocate the same number of\n # prototypes for each class\n assert self.num_prototypes % self.num_classes == 0\n # a one-hot indication matrix for each prototype's class identity\n self.register_buffer(\n \"prototype_class_identity\", torch.zeros(self.num_prototypes, self.num_classes, dtype=torch.int)\n )\n num_prototypes_per_class = self.num_prototypes // self.num_classes\n for j in range(self.num_prototypes):\n self.prototype_class_identity[j, j // num_prototypes_per_class] = 1\n\n # this has to be named features to allow the precise loading\n self.features = features\n self._init_add_on_layers(add_on_layers_type)\n\n self.register_parameter(\n \"prototype_vectors\", nn.Parameter(torch.rand(self.prototype_shape), requires_grad=learn_prototypes)\n )\n self.register_buffer(\"ones\", torch.ones(self.prototype_shape))\n self.register_buffer(\"corresponding_sample_idxs\", torch.full((self.num_prototypes,), -1))\n self.register_buffer(\"min_fmap_idxs\", torch.full((self.num_prototypes, 4), -1))\n\n self.readout_type = readout_type\n self._init_last_layer()\n\n if init_weights:\n self._initialize_weights()\n\n def _init_last_layer(self):\n # do not use bias to aid interpretability\n if self.readout_type == \"linear\": # standard linear\n self.last_layer = nn.Linear(self.num_prototypes, self.num_classes, bias=False)\n elif self.readout_type == \"sparse\": # sparse linear\n if not self.class_specific:\n raise ValueError('`readout_type` cannot be \"sparse\" if ' \"`class_specific` is False\")\n self.last_layer = GroupedLinear(self.num_prototypes, self.num_classes, groups=self.num_classes, bias=False)\n elif self.readout_type == \"proto\": # prototype sim sums as prediction\n if not self.class_specific:\n raise ValueError('`readout_type` cannot be \"proto\" if ' \"`class_specific` is False\")\n # Note that this assumes that `prototype_class_identity` is still\n # uniform across classes when class_specific is True\n self.last_layer = GroupedSum(self.num_prototypes, self.num_classes)\n else:\n raise NotImplementedError(f\"readout_type = {self.readout_type}\")\n\n def _init_add_on_layers(self, add_on_layers_type):\n in_channels = self.features.out_channels\n\n final_act, final_act_str = nn.Sigmoid(), \"sigmoid\"\n if add_on_layers_type == \"bottleneck\":\n add_on_layers = []\n current_in_channels = in_channels\n conv_idx = 1\n while current_in_channels > self.prototype_dim or not len(add_on_layers):\n current_out_channels = max(self.prototype_dim, (current_in_channels // 2))\n if current_out_channels > self.prototype_dim:\n conv2_str, act2, act2_str = (f\"conv{conv_idx + 1}\", nn.ReLU(), f\"relu{conv_idx + 1}\")\n else:\n assert current_out_channels == self.prototype_dim\n conv2_str, act2, act2_str = (\"conv_last\", final_act, final_act_str)\n add_on_layers.extend(\n (\n (\n f\"conv{conv_idx}\",\n nn.Conv2d(\n in_channels=current_in_channels, out_channels=current_out_channels, kernel_size=1\n ),\n ),\n (f\"relu{conv_idx}\", nn.ReLU()),\n (\n conv2_str,\n nn.Conv2d(\n in_channels=current_out_channels, out_channels=current_out_channels, kernel_size=1\n ),\n ),\n (act2_str, act2),\n )\n )\n current_in_channels = current_in_channels // 2\n conv_idx += 2\n elif add_on_layers_type == \"regular\":\n add_on_layers = (\n (\"conv1\", nn.Conv2d(in_channels=in_channels, out_channels=self.prototype_dim, kernel_size=1)),\n (\"relu1\", nn.ReLU()),\n (\n \"conv_last\",\n nn.Conv2d(in_channels=self.prototype_dim, out_channels=self.prototype_dim, kernel_size=1),\n ),\n (final_act_str, final_act),\n )\n else:\n raise ValueError(add_on_layers_type)\n add_on_layers = OrderedDict(add_on_layers)\n\n self.add_on_layers = nn.Sequential(add_on_layers)\n\n def conv_features(self, x):\n \"\"\"\n the feature input to prototype layer\n \"\"\"\n x = self.features(x)\n log_once(logger.info, f'features output shape: {(\"N\", *x.size()[1:])}')\n x = self.add_on_layers(x)\n log_once(logger.info, f'add_on_layers output shape: {(\"N\", *x.size()[1:])}')\n return x\n\n def compute_distances(self, x):\n return compute_distances(self.distance, x, self.prototype_vectors, self.ones)\n\n def prototype_distances(self, x):\n \"\"\"\n x is the raw input\n \"\"\"\n conv_features = self.conv_features(x)\n distances = self.compute_distances(conv_features)\n return conv_features, distances\n\n def dist_2_sim(self, distances):\n if self.prototype_activation == \"log\":\n # equivalent:\n # log((distances + 1) / (distances + epsilon)) # noqa: E800\n # but this one is numerically more accurate\n return torch.log(1 / (distances + self.epsilon) + 1)\n elif self.prototype_activation == \"linear\":\n if self.distance == \"cosine\":\n # dists = 1 - sim --> sim = 1 - dists\n return 1 - distances\n else:\n return -distances\n else:\n return self.prototype_activation(distances)\n\n def forward(self, x, return_features=False):\n result = self.prototype_distances(x)\n conv_features, distances = result\n outputs = self.classify_head(x, distances)\n if return_features:\n outputs[\"features\"] = conv_features\n return outputs\n\n def classify_head(self, x, distances):\n return self._classify_head_proto2patch(distances)\n\n def pixel_space_map(self, x_i, proto_dists, sigma_factor=1.0):\n # Note: one sample at a time! otherwise there will definitely be\n # memory issues on most hardware and ProtoNets\n dtype = proto_dists.dtype\n device = proto_dists.device\n\n # validate shape\n if x_i.ndim == 4:\n assert x_i.shape[0] == 1, x_i.shape\n x_i = torch.squeeze(x_i, 0)\n else:\n assert x_i.ndim == 3, x_i.shape\n\n if proto_dists.ndim == 4:\n assert proto_dists.shape[0] == 1, proto_dists.shape\n proto_dists = torch.squeeze(proto_dists, 0)\n else:\n assert proto_dists.ndim == 3, proto_dists.shape\n\n C, H, W = x_i.shape\n P, Hz, Wz = proto_dists.shape\n\n # dists --> sims\n proto_sims = self.dist_2_sim(proto_dists)\n # Sim maps\n heat_map_max = torch.zeros((P, H, W), dtype=dtype, device=device)\n heat_map_avg = torch.zeros_like(heat_map_max)\n heat_map_counts = torch.zeros_like(heat_map_avg, dtype=torch.int)\n\n rf_h = self.rf_sizes[:, :, 0].max()\n rf_w = self.rf_sizes[:, :, 1].max()\n\n do_super_rfs = rf_h >= H or rf_w >= W\n if do_super_rfs:\n # increase true rf_h/w\n where_big = torch.where((self.rf_sizes[:, :, 0] >= H) | (self.rf_sizes[:, :, 1] >= W))\n do_super_rfs = len(where_big[0]) > 1\n if do_super_rfs:\n # linear stretching assumption for super-100% RF networks\n naive_midpoints_h = torch.round((torch.arange(Hz) + 0.5) * H / Hz).int()\n naive_midpoints_w = torch.round((torch.arange(Wz) + 0.5) * W / Wz).int()\n\n im_midpoints = (H - 1) / 2, (W - 1) / 2\n\n pad_h = torch.round((im_midpoints[0] - naive_midpoints_h[where_big[0]]).abs().max()).int()\n pad_w = torch.round((im_midpoints[1] - naive_midpoints_w[where_big[1]]).abs().max()).int()\n\n # increase the RFs by the discovered padding amount\n rf_h = rf_h + 2 * pad_h\n rf_w = rf_w + 2 * pad_w\n\n k_size = max(rf_h, rf_w)\n sigma = k_size * sigma_factor\n g_kern = gaussian_kernel(k_size, sigma=sigma, device=device)\n\n for h in range(Hz):\n for w in range(Wz):\n # for patch h,w\n sims_hw = proto_sims[:, h, w][:, None, None] # P x 1 x 1\n h_size, w_size = self.rf_sizes[h, w] # rf_sizes: Hz x Wz x 2\n\n hs0, hs1, ws0, ws1 = self.rf_idxs[h, w]\n\n if do_super_rfs:\n mh, mw = naive_midpoints_h[h], naive_midpoints_w[w]\n\n hs0_ = mh - rf_h // 2\n hs1_ = mh + ceil(rf_h // 2)\n ws0_ = mw - rf_w // 2\n ws1_ = mw + ceil(rf_w // 2)\n\n h_pad0 = max(-hs0_, 0)\n h_pad1 = max(hs1_ - H - max(hs0_, 0), 0)\n w_pad0 = max(-ws0_, 0)\n w_pad1 = max(ws1_ - W - max(ws0_, 0), 0)\n\n if h_size < H:\n if hs0 != 0:\n h_pad0 += H - h_size\n else:\n h_pad1 += H - h_size\n if w_size < W:\n if ws0 != 0:\n w_pad0 += W - w_size\n else:\n w_pad1 += W - w_size\n\n g_kern_hw = g_kern[int(h_pad0) : k_size - ceil(h_pad1), int(w_pad0) : k_size - ceil(w_pad1)]\n else:\n h_pad0 = h_pad1 = 0\n w_pad0 = w_pad1 = 0\n if h_size < rf_h:\n if hs1 - rf_h < 0:\n h_pad0 += rf_h - h_size\n else:\n h_pad1 += rf_h - h_size\n if w_size < rf_w:\n if ws1 - rf_w < 0:\n w_pad0 += rf_w - w_size\n else:\n w_pad1 += rf_w - w_size\n g_kern_hw = g_kern[int(h_pad0) : k_size - ceil(h_pad1), int(w_pad0) : k_size - ceil(w_pad1)]\n\n sims_hw_full = sims_hw * g_kern_hw[None, :, :]\n\n heat_map_avg[:, hs0:hs1, ws0:ws1] += sims_hw_full\n heat_map_counts[:, hs0:hs1, ws0:ws1] += 1\n heat_map_max[:, hs0:hs1, ws0:ws1] = torch.maximum(sims_hw_full, heat_map_max[:, hs0:hs1, ws0:ws1])\n # take element-wise averages according to overlap tensor (counts)\n heat_map_sum = heat_map_avg.clone()\n heat_map_avg /= heat_map_counts\n\n return heat_map_max, heat_map_avg, heat_map_sum # each is P x H x W\n\n def pixel_space_upscale(self, x_i, proto_dists):\n # validate shape\n if x_i.ndim == 4:\n assert x_i.shape[0] == 1, x_i.shape\n x_i = torch.squeeze(x_i, 0)\n else:\n assert x_i.ndim == 3, x_i.shape\n\n if proto_dists.ndim == 4:\n assert proto_dists.shape[0] == 1, proto_dists.shape\n proto_dists = torch.squeeze(proto_dists, 0)\n else:\n assert proto_dists.ndim == 3, proto_dists.shape\n\n C, H, W = x_i.shape\n\n # dists --> sims\n proto_sims = self.dist_2_sim(proto_dists)\n # Sim maps\n heat_map = torch.nn.functional.interpolate(proto_sims[None], (H, W), mode=\"bicubic\")\n # 1 x P x H x W --> P x H x W\n heat_map = heat_map.squeeze(dim=0)\n\n return heat_map\n\n def pixel_space_bboxes(self, min_dist_idxs, proto_dists):\n if not (self.prototype_kernel_size == self.prototype_layer_stride == 1):\n raise NotImplementedError((self.prototype_kernel_size, self.prototype_layer_stride))\n N, P = min_dist_idxs.shape\n # N x P, N x P\n fmap_h_start, fmap_w_start = unravel_index(min_dist_idxs, proto_dists.shape[-2:])\n\n bboxes = []\n for i in range(N):\n bboxes_i = []\n for j in range(P):\n h, w = fmap_h_start[i, j], fmap_w_start[i, j]\n slices_hw = self.rf_slices[h][w]\n assert len(slices_hw) == 1, \"unsupported at the moment\"\n slice_h, slice_w = slices_hw[0]\n x1, y1 = slice_w.start, slice_h.start\n x2, y2 = slice_w.stop, slice_h.stop\n bboxes_i.append([x1, y1, x2, y2])\n bboxes.append(bboxes_i)\n bboxes = torch.tensor(bboxes)\n return bboxes # N x P x 4\n\n def pixel_space_centers_upscale(self, x, min_dist_idxs, proto_dists):\n if not (self.prototype_kernel_size == self.prototype_layer_stride == 1):\n raise NotImplementedError((self.prototype_kernel_size, self.prototype_layer_stride))\n _, _, H, W = x.shape\n Hz, Wz = proto_dists.shape[-2:]\n # N x P, N x P\n fmap_h_start, fmap_w_start = unravel_index(min_dist_idxs, [Hz, Wz])\n\n naive_midpoints_h = torch.round((torch.arange(Hz) + 0.5) * H / Hz).int()\n naive_midpoints_w = torch.round((torch.arange(Wz) + 0.5) * W / Wz).int()\n\n centers_x = naive_midpoints_w[fmap_w_start.cpu()]\n centers_y = naive_midpoints_h[fmap_h_start.cpu()]\n\n return centers_x, centers_y # NxP each\n\n def _classify_head_proto2patch(self, distances):\n # global min pooling (N x P x H x W --> N x P x 1 x 1)\n # I.e., the KxK patch of the latent representations z of the input\n # images that is most similar to each of the P prototypes. Output\n # indicates how present each prototype is in the image.\n min_distances, min_dist_idxs = self.global_min_pool(distances)\n # Convert distances to similarity using the log/linear function\n prototype_activations = self.dist_2_sim(min_distances)\n\n # Compute logits (N x C)\n logits = self.last_layer(prototype_activations)\n\n return {\n \"logits\": logits, # N x C\n \"min_distances\": min_distances, # N x P\n \"min_dist_idxs\": min_dist_idxs, # N x P\n \"distances\": distances, # N x P x H x W\n \"max_similarities\": prototype_activations, # N x P\n }\n\n @staticmethod\n def global_min_pool(distances):\n \"\"\"\n To gather `min_distances` using `min_dist_idxs`:\n\n ```python\n distances.flatten(start_dim=2).gather(\n dim=2, index=min_dist_idxs.flatten(start_dim=2)\n ).view_as(min_dist_idxs)\n ```\n\n :param distances:\n :return:\n \"\"\"\n with warnings.catch_warnings():\n # You'd think they would've checked for positionally passed args...\n warnings.filterwarnings(\n \"ignore\", \".*order of the arguments: ceil_mode and \" \"return_indices will change.*\", UserWarning\n )\n min_distances, min_dist_idxs = F.max_pool2d(\n -distances, kernel_size=(distances.size()[2], distances.size()[3]), return_indices=True\n )\n min_distances = -min_distances\n # N x P x 1 x 1 --> N x P\n min_distances = min_distances.view(min_distances.shape[0], min_distances.shape[1])\n min_dist_idxs = min_dist_idxs.view(min_dist_idxs.shape[0], min_dist_idxs.shape[1])\n return min_distances, min_dist_idxs\n\n def push_forward(self, x):\n \"\"\"this method is needed for the pushing operation\"\"\"\n return self.prototype_distances(x)\n\n def set_prototypes(self, new_prototype_vectors, corresponding_sample_idxs=None, min_fmap_idxs=None):\n self.prototype_vectors.data.copy_(new_prototype_vectors)\n err_msg = \"both min_fmap_idxs and corresponding_sample_idxs should be\" \" None or not None\"\n if corresponding_sample_idxs is not None:\n assert min_fmap_idxs is not None, err_msg\n self.corresponding_sample_idxs = corresponding_sample_idxs\n self.min_fmap_idxs = min_fmap_idxs\n else:\n assert min_fmap_idxs is None, err_msg\n\n def prune_prototypes(self, prototypes_to_prune):\n \"\"\"\n prototypes_to_prune: a list of indices each in\n [0, current number of prototypes - 1] that indicates the prototypes to\n be removed\n \"\"\"\n prototypes_to_keep = [*({*range(self.num_prototypes)} - {*prototypes_to_prune})]\n\n self.register_parameter(\n \"prototype_vectors\",\n nn.Parameter(self.prototype_vectors.data[prototypes_to_keep, ...], requires_grad=self.learn_prototypes),\n )\n self.corresponding_sample_idxs = self.corresponding_sample_idxs[prototypes_to_keep, ...]\n self.min_fmap_idxs = self.min_fmap_idxs[prototypes_to_keep, ...]\n\n self.prototype_shape = tuple(self.prototype_vectors.size())\n self.num_prototypes = self.prototype_shape[0]\n\n # changing self.last_layer in place\n # changing in_features and out_features make sure the numbers are\n # consistent\n if self.readout_type != \"linear\":\n raise NotImplementedError(\n f\"Removing prototypes for readout_type={self.readout_type}\" f\" is not implemented yet\"\n )\n self.last_layer.in_features = self.num_prototypes\n self.last_layer.out_features = self.num_classes\n self.last_layer.weight.data = self.last_layer.weight.data[:, prototypes_to_keep]\n\n # self.ones is nn.Parameter\n self.ones = self.ones[prototypes_to_keep, ...]\n\n # self.prototype_class_identity is torch tensor\n # so it does not need .data access for value update\n if self.class_specific:\n self.prototype_class_identity = self.prototype_class_identity[prototypes_to_keep, :]\n\n def set_last_layer_incorrect_connection(self):\n \"\"\"\n Initialize weight of last_layer to correct_strength if\n prototype_class_identity is 1 (i.e., the prototype is for that class),\n and to incorrect_strength if prototype_class_identity is 0 (i.e., the\n prototype is not for that class)\n \"\"\"\n positive_one_weights_locations = torch.t(self.prototype_class_identity)\n negative_one_weights_locations = 1 - positive_one_weights_locations\n\n self.last_layer.weight.data.copy_(\n self.correct_strength * positive_one_weights_locations\n + self.incorrect_strength * negative_one_weights_locations\n )\n\n def _initialize_weights(self):\n for name, m in self.add_on_layers.named_children():\n if isinstance(m, nn.Conv2d):\n if name == \"conv_last\":\n # for the sigmoid activation\n nn.init.xavier_normal_(m.weight, gain=1.0)\n else:\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n if self.class_specific and self.readout_type == \"linear\":\n # This is not needed (or valid) for sparse linear or proto\n self.set_last_layer_incorrect_connection()\n elif self.class_specific and self.readout_type == \"sparse\":\n nn.init.ones_(self.last_layer.weight)" }, { "identifier": "protonet", "path": "pixpnet/protonets/models/protonet.py", "snippet": "def protonet(\n feature_extractor,\n feature_layer=None,\n pretrained=True,\n num_prototypes=2000,\n prototype_dim=512,\n prototype_kernel_size=1,\n num_classes=200,\n input_size=224,\n init_weights=True,\n prototype_activation: Union[str, Callable] = \"log\",\n add_on_layers_type=\"regular\",\n class_specific=True,\n epsilon=1e-6,\n learn_prototypes=True,\n incorrect_strength=-0.5,\n correct_strength=1,\n readout_type=\"linear\",\n distance=\"l2\",\n):\n \"\"\"\"\"\"\n if isinstance(feature_extractor, str):\n last_module_name = []\n if feature_layer:\n last_module_name.append(feature_layer)\n if len(last_module_name) == 1:\n last_module_name = last_module_name[0]\n features = get_feature_extractor(\n feature_extractor,\n pretrained=pretrained,\n last_module_name=last_module_name or None,\n )\n _, rf_data = compute_rf_data(feature_extractor, input_size, input_size, num_classes=1)\n rf_layer = rf_data[feature_layer]\n h_z, w_z = rf_layer.shape[-2:]\n rf_slices = []\n for h in range(h_z):\n slices_h = []\n for w in range(w_z):\n rf_feat_hw = take_rf_from_bbox(\n rf_layer, h, w, prototype_kernel_size, prototype_kernel_size, ProtoNet.prototype_layer_stride\n )\n slices_hw = []\n for slice_hw in rf_feat_hw.as_slices(all_channels=True):\n _, _, h_s, w_s = slice_hw\n slices_hw.append((h_s, w_s))\n slices_h.append(slices_hw)\n rf_slices.append(slices_h)\n\n else:\n features = feature_extractor\n rf_slices = None\n\n if feature_layer is None:\n feature_layer = features.last_module_name[0] if features.multi_output else features.last_module_name\n\n return ProtoNet(\n features=features,\n feature_layer=feature_layer,\n rf_slices=rf_slices,\n num_prototypes=num_prototypes,\n prototype_dim=prototype_dim,\n prototype_kernel_size=prototype_kernel_size,\n num_classes=num_classes,\n init_weights=init_weights,\n prototype_activation=prototype_activation,\n add_on_layers_type=add_on_layers_type,\n class_specific=class_specific,\n epsilon=epsilon,\n learn_prototypes=learn_prototypes,\n incorrect_strength=incorrect_strength,\n correct_strength=correct_strength,\n readout_type=readout_type,\n distance=distance,\n )" }, { "identifier": "push_prototypes", "path": "pixpnet/protonets/push.py", "snippet": "def push_prototypes(\n dataloader: SubsetWithIdx, protonet, class_specific=True, preprocess_func=None, duplicate_filter=\"sample\"\n):\n \"\"\"push each prototype to the nearest patch in the training set\"\"\"\n was_training = protonet.training\n protonet.eval()\n\n prototype_shape = protonet.prototype_shape\n n_prototypes = protonet.num_prototypes\n prototype_layer_stride = protonet.prototype_layer_stride\n\n device = protonet.prototype_vectors.device\n dtype = protonet.prototype_vectors.dtype\n\n # saves the closest distance seen so far\n min_proto_dists = torch.full((n_prototypes,), torch.inf, dtype=dtype, device=device)\n # saves the patch representation that gives the current smallest distance\n min_fmap_patches = torch.zeros(prototype_shape, dtype=dtype, device=device)\n # saves the sample indices that each prototype corresponds to in dataloader\n min_sample_idxs = protonet.corresponding_sample_idxs\n # save the feature map indices\n min_fmap_idxs = protonet.min_fmap_idxs\n\n with torch.no_grad():\n # Find the closest training images to each prototype across the entire\n # data set (updates closest each batch to achieve global maximums)\n for sample_idxs, x, y in dataloader:\n x = x.to(device)\n y = y.to(device)\n\n _update_prototypes_on_batch(\n sample_idxs=sample_idxs,\n x=x,\n y=y,\n protonet=protonet,\n min_proto_dists=min_proto_dists,\n min_fmap_patches=min_fmap_patches,\n min_sample_idxs=min_sample_idxs,\n min_fmap_idxs=min_fmap_idxs,\n class_specific=class_specific,\n preprocess_func=preprocess_func,\n proto_layer_stride=prototype_layer_stride,\n duplicate_filter=duplicate_filter,\n )\n\n q = torch.tensor([0, 0.25, 0.50, 0.75, 1], dtype=dtype, device=device)\n dist_percentiles = torch.quantile(min_proto_dists, q).tolist()\n logger.info(\n f\"Prototypes pushing distances stats:\\n\"\n f' {\" / \".join(f\"{x * 100:6.2f}%\" for x in q.tolist())}\\n'\n f' {\" / \".join(f\"{x:7.4f}\" for x in dist_percentiles)}\\n'\n f\" {int(torch.isnan(min_proto_dists).sum())} / \"\n f\"{min_proto_dists.numel()} are NaN\"\n )\n\n # Executing push...\n prototype_update = torch.reshape(min_fmap_patches, prototype_shape)\n\n proto_norm_pre = torch.norm(protonet.prototype_vectors)\n proto_norm_post = torch.norm(prototype_update)\n\n logger.info(\n f\"Prototype vector Frobenius norm pre- and post-push: \" f\"{proto_norm_pre:.4f} --> {proto_norm_post:.4f}\"\n )\n\n protonet.set_prototypes(\n prototype_update, # P x D x K x K\n corresponding_sample_idxs=min_sample_idxs, # P\n min_fmap_idxs=min_fmap_idxs, # P x 4\n )\n\n if was_training:\n protonet.train()" }, { "identifier": "get_logger", "path": "pixpnet/utils.py", "snippet": "def get_logger(name):\n logging.basicConfig(\n format=\"%(asctime)s[%(process)d][%(levelname)s] %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n logger = logging.getLogger(name)\n logger.setLevel(os.environ.get(\"PIXPNET_LOG_LEVEL\", \"INFO\"))\n return logger" }, { "identifier": "intersect_func_and_kwargs", "path": "pixpnet/utils.py", "snippet": "def intersect_func_and_kwargs(func, kwargs, exclude_func_args=None, exclude_kwargs=None, return_invalid=True):\n func_args = {*get_all_func_args(func)} - (set() if exclude_func_args is None else {*exclude_func_args})\n if isinstance(kwargs, argparse.Namespace):\n kwargs = vars(kwargs)\n kwargs_keys = {*kwargs.keys()} - (set() if exclude_kwargs is None else {*exclude_kwargs})\n\n intersecting_keys = kwargs_keys & func_args\n intersected_dict = {k: kwargs[k] for k in intersecting_keys}\n if return_invalid:\n return intersected_dict, kwargs_keys - func_args\n return intersected_dict" } ]
import argparse import torch from typing import Tuple from torch import nn from torchmetrics import Accuracy from pytorch_lightning.loops import FitLoop from pytorch_lightning.loops.fit_loop import _FitLoop as FitLoop from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.loggers import TensorBoardLogger from pixpnet.data import get_metadata from pixpnet.lightning.lightning_data import LitData from pixpnet.lightning.lit_module import BaseLitModel from pixpnet.optim import get_optimizer_cls, get_scheduler from pixpnet.protonets.loss import ClusterLoss, L1ReadoutLoss, SeparationLoss from pixpnet.protonets.models.protonet import ProtoNet, protonet from pixpnet.protonets.push import push_prototypes from pixpnet.utils import get_logger, intersect_func_and_kwargs
10,856
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # # SPDX-License-Identifier: AGPL-3.0-or-later try: except ImportError: logger = get_logger(__name__) def params_with_grad(parameters): return filter(lambda p: p.requires_grad, parameters) def make_optimizers_proto( model: ProtoNet, config: argparse.Namespace, ) -> Tuple[torch.optim.Optimizer, ...]: """"""
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL) # # SPDX-License-Identifier: AGPL-3.0-or-later try: except ImportError: logger = get_logger(__name__) def params_with_grad(parameters): return filter(lambda p: p.requires_grad, parameters) def make_optimizers_proto( model: ProtoNet, config: argparse.Namespace, ) -> Tuple[torch.optim.Optimizer, ...]: """"""
optimizer_cls, hparams = get_optimizer_cls(config, ignore={"fine_tune_lr", "readout_lr"})
3
2023-12-06 23:49:31+00:00
16k
open-mmlab/PIA
animatediff/pipelines/i2v_pipeline.py
[ { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" }, { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module = True,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # Image to Video Conv\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n @property\n def attn_processors(self) -> Dict[str, AttnProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]):\n r\"\"\"\n Parameters:\n `processor (`dict` of `AttnProcessor` or `AttnProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n of **all** `CrossAttention` layers.\n In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.:\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n print(f'Set {module}')\n module.set_processor(processor)\n else:\n print(f'Set {module}')\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n mask_sample: torch.FloatTensor,\n masked_sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n image_embeds: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # image to video b c f h w\n sample = torch.cat([sample, mask_sample, masked_sample], dim=1).to(sample.device)\n\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * - 10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # prepare for ip-adapter\n if image_embeds is not None:\n image_embeds = self.encoder_hid_proj(\n image_embeds).to(encoder_hidden_states.dtype)\n encoder_hidden_states = torch.cat(\n [encoder_hidden_states, image_embeds], dim=1)\n\n # pre-process\n # b c f h w\n # 2 4 16 64 64\n sample = self.conv_in(sample)\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n return text_model_dict" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config, only_decoder=False, only_encoder=False):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n if only_decoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('decoder') or k.startswith('post_quant')}\n elif only_encoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('encoder') or k.startswith('quant')}\n\n return new_checkpoint" }, { "identifier": "convert_lora_model_level", "path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py", "snippet": "def convert_lora_model_level(state_dict, unet, text_encoder=None, LORA_PREFIX_UNET=\"lora_unet\", LORA_PREFIX_TEXT_ENCODER=\"lora_te\", alpha=0.6):\n \"\"\"convert lora in model level instead of pipeline leval\n \"\"\"\n\n visited = []\n\n # directly update weight in diffusers model\n for key in state_dict:\n # it is suggested to print out the key, it usually will be something like below\n # \"lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight\"\n\n # as we have set the alpha beforehand, so just skip\n if \".alpha\" in key or key in visited:\n continue\n\n if \"text\" in key:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\"_\")\n assert text_encoder is not None, (\n 'text_encoder must be passed since lora contains text encoder layers')\n curr_layer = text_encoder\n else:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_UNET + \"_\")[-1].split(\"_\")\n curr_layer = unet\n\n # find the target layer\n temp_name = layer_infos.pop(0)\n while len(layer_infos) > -1:\n try:\n curr_layer = curr_layer.__getattr__(temp_name)\n if len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n elif len(layer_infos) == 0:\n break\n except Exception:\n if len(temp_name) > 0:\n temp_name += \"_\" + layer_infos.pop(0)\n else:\n temp_name = layer_infos.pop(0)\n\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n # NOTE: load lycon, meybe have bugs :(\n if 'conv_in' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n shape[1] = 4\n curr_layer.weight.data[:, :4, ...] += alpha * (weight_up @ weight_down).view(*shape)\n elif 'conv' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n curr_layer.weight.data += alpha * (weight_up @ weight_down).view(*shape)\n elif len(state_dict[pair_keys[0]].shape) == 4:\n weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3).to(curr_layer.weight.data.device)\n else:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n # update visited list\n for item in pair_keys:\n visited.append(item)\n\n return unet, text_encoder" }, { "identifier": "prepare_mask_coef_by_statistics", "path": "animatediff/utils/util.py", "snippet": "def prepare_mask_coef_by_statistics(video_length: int, cond_frame: int, sim_range: int):\n assert video_length > 0, \\\n 'video_length should be greater than 0'\n\n assert video_length > cond_frame,\\\n 'video_length should be greater than cond_frame'\n\n range_list = RANGE_LIST\n\n assert sim_range < len(range_list),\\\n f'sim_range type{sim_range} not implemented'\n\n coef = range_list[sim_range]\n coef = coef + ([coef[-1]] * (video_length - len(coef)))\n\n order = [abs(i - cond_frame) for i in range(video_length)]\n coef = [coef[order[i]] for i in range(video_length)]\n\n return coef" } ]
import inspect import os.path as osp import numpy as np import torch from dataclasses import dataclass from typing import Callable, List, Optional, Union from diffusers.configuration_utils import FrozenDict from diffusers.loaders import IPAdapterMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import (DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler) from diffusers.utils import (BaseOutput, deprecate, is_accelerate_available, logging) from diffusers.utils.import_utils import is_xformers_available from einops import rearrange from omegaconf import OmegaConf from packaging import version from safetensors import safe_open from tqdm import tqdm from transformers import (CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection) from animatediff.models.resnet import InflatedConv3d from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import (convert_ldm_clip_checkpoint, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint) from animatediff.utils.convert_lora_safetensor_to_diffusers import \ convert_lora_model_level from animatediff.utils.util import prepare_mask_coef_by_statistics from accelerate import cpu_offload
11,990
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
1
2023-12-21 03:29:34+00:00
16k
xinghaochen/TinySAM
tinysam/hierarchical_mask_generator.py
[ { "identifier": "Sam", "path": "tinysam/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "tinysam/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n #import pdb; pdb.set_trace()\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "tinysam/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "tinysam/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "tinysam/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "tinysam/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "tinysam/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "tinysam/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "tinysam/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "tinysam/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "tinysam/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "tinysam/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "tinysam/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "tinysam/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "tinysam/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "tinysam/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,985
for sx in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[1] + hstride * 2 < ih: for sy in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih]) if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask":
# Copyright 2023 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ class SamHierarchicalMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, high_score_thresh: float = 8.5, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. high_score_thresh (float): A filtering threshold in [-inf,inf], to find out the unmasked area for the next generation. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_side = points_per_side self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.high_score_thresh = high_score_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode def set_point_grids(self, point_grids): self.point_grids = point_grids def set_points_per_side(self, points_per_side): self.point_grids = build_all_layer_point_grids( points_per_side, 0, 1, ) @torch.no_grad() def set_image(self, image: np.ndarray) -> MaskData: # Crop the image and calculate embeddings self.predictor.set_image(image) @torch.no_grad() def hierarchical_generate(self, image: np.ndarray) -> List[Dict[str, Any]]: self.set_image(image) self.set_points_per_side(self.points_per_side // 4) ori_masks, or_results = self.generate(image, True) ih, iw, _ = image.shape hstride = ih // self.points_per_side wstride = iw // self.points_per_side new_points = [] pass_counter = 0 full_point_grids = np.array(self.point_grids) for mask in range(full_point_grids.shape[1]): point_coords = [full_point_grids[0, mask, 0] * iw, full_point_grids[0, mask, 1] * ih] for sy in [-1, 0, 1]: for sx in [-1, 0, 1]: if (sy == 0 and sx == 0) or or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[0] + wstride * 2 < iw: for sx in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * sx)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * sx) / ih]) if point_coords[1] + hstride * 2 < ih: for sy in [-1, 0, 1]: if or_results[int(point_coords[0] + wstride * sy), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * sy) / iw, (point_coords[1] + hstride * 2) / ih]) if point_coords[0] + wstride * 2 < iw and point_coords[1] + hstride * 2 < ih: if or_results[int(point_coords[0] + wstride * 2), int(point_coords[1] + hstride * 2)]: continue new_points.append([(point_coords[0] + wstride * 2) / iw, (point_coords[1] + hstride * 2) / ih]) self.set_point_grids([np.array(new_points)]) new_masks = self.generate(image, False) new_masks.cat(ori_masks) new_masks = self.post_process(image, new_masks) return new_masks @torch.no_grad() def generate(self, image: np.ndarray, need_high: bool) -> MaskData: orig_size = image.shape[:2] # Get points for this crop points_scale = np.array(orig_size)[None, ::-1] points_for_image = self.point_grids[0] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, orig_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], return_logits=True, ) # Serialize predictions and store in MaskData batch_data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks if self.pred_iou_thresh > 0.0: keep_mask = batch_data["iou_preds"] > self.pred_iou_thresh batch_data.filter(keep_mask) # Calculate stability score batch_data["stability_score"] = calculate_stability_score( batch_data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = batch_data["stability_score"] >= self.stability_score_thresh batch_data.filter(keep_mask) if need_high: batch_data["high_masks"] = batch_data["masks"] > self.high_score_thresh batch_data["masks"] = batch_data["masks"] > self.predictor.model.mask_threshold batch_data["boxes"] = batched_mask_to_box(batch_data["masks"]) keep_mask = ~is_box_near_crop_edge(batch_data["boxes"], [0, 0, orig_w, orig_h], [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): batch_data.filter(keep_mask) # Compress to RLE batch_data["rles"] = mask_to_rle_pytorch(batch_data["masks"]) data.cat(batch_data) del batch_data if need_high: high_masks = data["high_masks"] or_results = torch.zeros([high_masks.shape[1], high_masks.shape[2]]).to(high_masks.device) for mask in high_masks: or_results = torch.logical_or(or_results, mask) del data["high_masks"] or_results = or_results.permute(1, 0) del data['masks'] return data, or_results else: del data['masks'] return data @torch.no_grad() def reset_image(self): self.predictor.reset_image() @torch.no_grad() def post_process(self, image: np.ndarray, data: MaskData) -> List[Dict[str, Any]]: orig_size = image.shape[:2] orig_h, orig_w = orig_size keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: data = self.postprocess_small_regions( data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": data["segmentations"] = [coco_encode_rle(rle) for rle in data["rles"]] elif self.output_mode == "binary_mask":
data["segmentations"] = [rle_to_mask(rle) for rle in data["rles"]]
14
2023-12-19 11:25:54+00:00
16k
DeepWok/mase
machop/chop/passes/graph/transforms/verilog/emit_tb.py
[ { "identifier": "emit_data_in_tb_sv", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_in.py", "snippet": "def emit_data_in_tb_sv(data_width, load_path, out_file):\n buff = f\"\"\"\n`timescale 1 ns / 1 ps\n\nmodule AESL_autofifo_data_in_V (\n clk,\n reset,\n if_empty_n,\n if_read,\n if_dout,\n if_full_n,\n if_write,\n if_din,\n ready,\n done\n);\n\n //------------------------Parameter----------------------\n localparam TV_IN = \"{load_path}\";\n\n //------------------------Local signal-------------------\n parameter DATA_WIDTH = 32'd{data_width};\n parameter ADDR_WIDTH = 32'd1;\n parameter DEPTH = 32'd1;\n\n // Input and Output\n input clk;\n input reset;\n input if_write;\n input [DATA_WIDTH - 1 : 0] if_din;\n output if_full_n;\n input if_read;\n output [DATA_WIDTH - 1 : 0] if_dout;\n output if_empty_n;\n input ready;\n input done;\n\n // Inner signals\n reg [DATA_WIDTH - 1 : 0] mem[0 : DEPTH - 1];\n initial begin : initialize_mem\n integer i;\n for (i = 0; i < DEPTH; i = i + 1) begin\n mem[i] = 0;\n end\n end\n reg [ADDR_WIDTH : 0] mInPtr = 0;\n reg [ADDR_WIDTH : 0] mOutPtr = 0;\n reg mFlag_hint; // 0: empty hint, 1: full hint\n\n assign if_dout = (mOutPtr >= DEPTH) ? 0 : mem[mOutPtr];\n assign if_empty_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b0)? 1'b 0: 1'b 1;\n assign if_full_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b1)? 1'b 0: 1'b 1;\n\n //------------------------Task and function--------------\n task read_token;\n input integer fp;\n output reg [127 : 0] token;\n integer ret;\n begin\n token = \"\";\n ret = 0;\n ret = $fscanf(fp, \"%s\", token);\n end\n endtask\n\n //------------------------Read-only fifo-------------------\n\n // Write operation for read_only fifo\n initial begin : read_file_process\n integer fp;\n integer err;\n integer ret;\n integer transaction_idx;\n reg [127 : 0] token;\n reg [8*5 : 1] str;\n reg [DATA_WIDTH - 1 : 0] mem_tmp;\n mInPtr = 0;\n mFlag_hint = 0;\n transaction_idx = 0;\n fp = $fopen(TV_IN, \"r\");\n if (fp == 0) begin // Failed to open file\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", TV_IN);\n $finish;\n end\n read_token(fp, token);\n if (token != \"[[[runtime]]]\") begin // Illegal format\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp, token);\n while (token != \"[[[/runtime]]]\") begin\n if (token != \"[[transaction]]\") begin\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp, token); // skip transaction number\n\n @(posedge clk);\n #0.2;\n while (ready !== 1) begin\n @(posedge clk);\n #0.2;\n end\n read_token(fp, token);\n mInPtr = 0;\n while (token != \"[[/transaction]]\") begin\n if (mInPtr >= DEPTH) begin\n $display(\"Fifo overflow!\");\n $finish;\n end\n ret = $sscanf(token, \"0x%x\", mem_tmp);\n mem[mInPtr] = mem_tmp;\n if (ret != 1) begin\n $display(\"Failed to parse token!\");\n $finish;\n end\n mInPtr = mInPtr + 1;\n read_token(fp, token);\n end\n mFlag_hint = 0;\n read_token(fp, token);\n transaction_idx = transaction_idx + 1;\n end\n $fclose(fp);\n @(posedge clk);\n #0.2;\n while (ready !== 1) begin\n @(posedge clk);\n #0.2;\n end\n mInPtr = 0;\n end\n\n // Read operation for read_only fifo\n always @(posedge clk) begin\n if (reset === 1) begin\n mOutPtr = 0;\n end else if (if_read === 1) begin\n if (mOutPtr < mInPtr) mOutPtr <= mOutPtr + 1;\n end\n end\n\n // Reset mOutPtr when done is pulled up\n initial begin : done_reset_mOutPtr_process\n while (1) begin\n @(posedge clk);\n #0.1;\n while (ready !== 1) begin\n @(posedge clk);\n #0.1;\n end\n mOutPtr = 0;\n end\n end\n\nendmodule\n\"\"\"\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Input data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting input data fifo failed.\"\n os.system(f\"verible-verilog-format --inplace {out_file}\")" }, { "identifier": "emit_data_in_tb_dat", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_in.py", "snippet": "def emit_data_in_tb_dat(node, data_in, out_file):\n in_size = node.meta[\"mase\"].parameters[\"hardware\"][\"verilog_param\"][\n \"DATA_IN_0_TENSOR_SIZE_DIM_0\"\n ]\n in_width = node.meta[\"mase\"].parameters[\"common\"][\"args\"][\"data_in_0\"][\"precision\"][\n 0\n ]\n assert len(data_in[0]) % in_size == 0\n\n trans = \"\"\"[[transaction]] {}\n{}\n[[/transaction]]\n\"\"\"\n\n data = [x for trans in data_in for x in trans]\n data_buff = \"\"\n trans_count = 0\n value = 0\n for i, d in enumerate(data):\n if in_size == 1 or i % in_size == in_size - 1:\n data_buff += trans.format(trans_count, hex(value))\n trans_count += 1\n value = 0\n else:\n for _ in range(0, i % in_size):\n d = d << in_width\n value = value + d\n\n buff = f\"\"\"[[[runtime]]]\n{data_buff}[[[/runtime]]]\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Input data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting input data fifo failed.\"" }, { "identifier": "emit_data_out_tb_sv", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_out.py", "snippet": "def emit_data_out_tb_sv(data_width, load_path, store_path, out_file):\n buff = f\"\"\"\n`timescale 1 ns / 1 ps\n\nmodule AESL_autofifo_data_out_V (\n clk,\n reset,\n if_empty_n,\n if_read,\n if_dout,\n if_full_n,\n if_write,\n if_din,\n ready,\n done\n);\n\n //------------------------Parameter----------------------\n localparam\n\tTV_IN\t=\t\"{load_path}\",\n\tTV_OUT\t=\t\"{store_path}\";\n\n //------------------------Local signal-------------------\n parameter DATA_WIDTH = 32'd{data_width};\n parameter ADDR_WIDTH = 32'd1;\n parameter DEPTH = 32'd1;\n\n // Input and Output\n input clk;\n input reset;\n input if_write;\n input [DATA_WIDTH - 1 : 0] if_din;\n output if_full_n;\n input if_read;\n output [DATA_WIDTH - 1 : 0] if_dout;\n output if_empty_n;\n input ready;\n input done;\n\n // Inner signals\n reg [DATA_WIDTH - 1 : 0] mem[0 : DEPTH - 1];\n initial begin : initialize_mem\n integer i;\n for (i = 0; i < DEPTH; i = i + 1) begin\n mem[i] = 0;\n end\n end\n reg [ADDR_WIDTH : 0] mInPtr = 0;\n reg [ADDR_WIDTH : 0] mOutPtr = 0;\n reg mFlag_hint; // 0: empty hint, 1: full hint\n\n assign if_dout = (mOutPtr >= DEPTH) ? 0 : mem[mOutPtr];\n assign if_empty_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b0)? 1'b 0: 1'b 1;\n assign if_full_n = ((mInPtr == mOutPtr) && mFlag_hint == 1'b1)? 1'b 0: 1'b 1;\n\n //------------------------Task and function--------------\n task read_token;\n input integer fp;\n output reg [127 : 0] token;\n integer ret;\n begin\n token = \"\";\n ret = 0;\n ret = $fscanf(fp, \"%s\", token);\n end\n endtask\n\n //------------------------Write-only fifo-------------------\n\n // Write operation for write-only fifo\n always @(posedge clk) begin\n if (reset === 1) begin\n mInPtr = 0;\n end else if (if_write) begin\n if (mInPtr < DEPTH) begin\n mem[mInPtr] = if_din;\n mInPtr <= mInPtr + 1;\n end\n end\n end\n\n // Reset mInPtr when done is pulled up\n initial begin : done_reset_mInPtr_process\n while (1) begin\n @(posedge clk);\n #0.2;\n while (done !== 1) begin\n @(posedge clk);\n #0.2;\n end\n mInPtr = 0;\n end\n end\n\n // Read operation for write-only fifo\n initial begin : write_file_process\n integer fp;\n integer transaction_idx;\n reg [8*5 : 1] str;\n integer idx;\n transaction_idx = 0;\n mOutPtr = DEPTH;\n mFlag_hint = 1;\n while (1) begin\n @(posedge clk);\n #0.1;\n while (done !== 1) begin\n @(posedge clk);\n #0.1;\n end\n fp = $fopen(TV_OUT, \"a\");\n if (fp == 0) begin // Failed to open file\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", TV_OUT);\n $finish;\n end\n $fdisplay(fp, \"[[transaction]] %d\", transaction_idx);\n for (idx = 0; idx < mInPtr; idx = idx + 1) begin\n $fdisplay(fp, \"0x%x\", mem[idx]);\n end\n $fdisplay(fp, \"[[/transaction]]\");\n transaction_idx = transaction_idx + 1;\n $fclose(fp);\n end\n end\n\nendmodule\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Output data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting output data fifo failed.\"\n os.system(f\"verible-verilog-format --inplace {out_file}\")" }, { "identifier": "emit_data_out_tb_dat", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_data_out.py", "snippet": "def emit_data_out_tb_dat(node, data_out, out_file):\n out_size = node.meta[\"mase\"].parameters[\"hardware\"][\"verilog_param\"][\n \"DATA_OUT_0_TENSOR_SIZE_0_DIM_0\"\n ]\n out_width = node.meta[\"mase\"].parameters[\"common\"][\"results\"][\"data_out_0\"][\n \"precision\"\n ][0]\n assert (\n len(data_out[0]) % out_size == 0\n ), f\"Cannot perfectly partition: {len(data_out[0])}/{out_size}\"\n\n trans = \"\"\"[[transaction]] {}\n{}\n[[/transaction]]\n\"\"\"\n\n data = [x for trans in data_out for x in trans]\n data_buff = \"\"\n trans_count = 0\n value = 0\n for i, d in enumerate(data):\n if out_size == 1 or i % out_size == out_size - 1:\n data_buff += trans.format(trans_count, hex(value))\n trans_count += 1\n value = 0\n else:\n for _ in range(0, i % out_size):\n d = d << out_width\n value = value + d\n\n buff = f\"\"\"[[[runtime]]]\n{data_buff}[[[/runtime]]]\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Input data fifo emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting input data fifo failed.\"" }, { "identifier": "emit_top_tb", "path": "machop/chop/passes/graph/transforms/verilog/emit_tb_testbench.py", "snippet": "def emit_top_tb(\n tv_dir,\n top_name,\n out_file,\n in_width,\n in_size,\n out_width,\n out_size,\n in_trans_num,\n out_trans_num,\n):\n sw_data_in = os.path.join(tv_dir, \"sw_data_in.dat\")\n sw_data_out = os.path.join(tv_dir, \"sw_data_out.dat\")\n hw_data_out = os.path.join(tv_dir, \"hw_data_out.dat\")\n hw_stream_size = os.path.join(tv_dir, \"data_in_stream_size.dat\")\n\n buff = f\"\"\"\n`timescale 1ns / 1ps\n\n\n`define AUTOTB_DUT {top_name}\n`define AUTOTB_DUT_INST AESL_inst_{top_name}\n`define AUTOTB_TOP {top_name}_tb\n`define AUTOTB_LAT_RESULT_FILE \"{top_name}.result.lat.rb\"\n`define AUTOTB_PER_RESULT_TRANS_FILE \"{top_name}.performance.result.transaction.xml\"\n`define AUTOTB_TOP_INST AESL_inst_apatb_{top_name}_top\n`define AUTOTB_MAX_ALLOW_LATENCY 15000000\n`define AUTOTB_CLOCK_PERIOD_DIV2 5.00\n\n`define AESL_FIFO_data_in_V AESL_autofifo_data_in_V\n`define AESL_FIFO_INST_data_in_V AESL_autofifo_inst_data_in_V\n`define AESL_FIFO_data_out_V AESL_autofifo_data_out_V\n`define AESL_FIFO_INST_data_out_V AESL_autofifo_inst_data_out_V\n`define SW_DATA_IN_DAT \"{sw_data_in}\"\n`define SW_DATA_OUT_DAT \"{sw_data_out}\"\n`define HW_DATA_OUT_DAT \"{hw_data_out}\"\nmodule `AUTOTB_TOP;\n\n parameter IN_TRANSACTION_NUM = {in_trans_num};\n parameter OUT_TRANSACTION_NUM = {out_trans_num};\n parameter PROGRESS_TIMEOUT = 10000000;\n parameter LATENCY_ESTIMATION = 0;\n parameter LENGTH_data_in_V = 1;\n parameter LENGTH_data_out_V = 1;\n parameter TOKEN_WIDTH = {max(128, 2*out_width*out_size)+16};\n parameter IN_WIDTH = {in_width};\n parameter IN_SIZE = {in_size};\n parameter OUT_WIDTH = {out_width};\n parameter OUT_SIZE = {out_size};\n\n task read_token;\n input integer fp;\n output reg [TOKEN_WIDTH-1 : 0] token;\n integer ret;\n begin\n token = \"\";\n ret = 0;\n ret = $fscanf(fp, \"%s\", token);\n end\n endtask\n\n task post_check;\n input integer fp1;\n input integer fp2;\n reg [TOKEN_WIDTH-1 : 0] token1;\n reg [TOKEN_WIDTH-1 : 0] token2;\n reg [TOKEN_WIDTH-1 : 0] golden;\n reg [TOKEN_WIDTH-1 : 0] result;\n integer ret;\n begin\n read_token(fp1, token1);\n read_token(fp2, token2);\n if (token1 != \"[[[runtime]]]\" || token2 != \"[[[runtime]]]\") begin\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp1, token1);\n read_token(fp2, token2);\n while (token1 != \"[[[/runtime]]]\" && token2 != \"[[[/runtime]]]\") begin\n if (token1 != \"[[transaction]]\" || token2 != \"[[transaction]]\") begin\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp1, token1); // skip transaction number\n read_token(fp2, token2); // skip transaction number\n read_token(fp1, token1);\n read_token(fp2, token2);\n while (token1 != \"[[/transaction]]\" && token2 != \"[[/transaction]]\") begin\n ret = $sscanf(token1, \"0x%x\", golden);\n if (ret != 1) begin\n $display(\"Failed to parse token!\");\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n ret = $sscanf(token2, \"0x%x\", result);\n if (ret != 1) begin\n $display(\"Failed to parse token!\");\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n if (golden != result) begin\n $display(\"%x (expected) vs. %x (actual) - mismatch\", golden, result);\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n read_token(fp1, token1);\n read_token(fp2, token2);\n end\n read_token(fp1, token1);\n read_token(fp2, token2);\n end\n end\n endtask\n\n reg AESL_clock;\n reg rst;\n reg dut_rst;\n reg start;\n reg ce;\n reg tb_continue;\n wire AESL_start;\n wire AESL_reset;\n wire AESL_ce;\n wire AESL_ready;\n wire AESL_idle;\n wire AESL_continue;\n wire AESL_done;\n reg AESL_done_delay = 0;\n reg AESL_done_delay2 = 0;\n reg AESL_ready_delay = 0;\n wire ready;\n wire ready_wire;\n wire ap_start;\n wire ap_done;\n wire ap_idle;\n wire ap_ready;\n wire [IN_WIDTH*IN_SIZE-1 : 0] data_in_V_dout;\n wire data_in_V_empty_n;\n wire data_in_V_read;\n wire [OUT_WIDTH*OUT_SIZE-1 : 0] data_out_V_din;\n wire data_out_V_full_n;\n wire data_out_V_write;\n integer done_cnt = 0;\n integer AESL_ready_cnt = 0;\n integer ready_cnt = 0;\n reg ready_initial;\n reg ready_initial_n;\n reg ready_last_n;\n reg ready_delay_last_n;\n reg done_delay_last_n;\n reg interface_done = 0;\n\n wire ap_clk;\n wire ap_rst;\n wire ap_rst_n;\n\n wire [IN_WIDTH-1:0] data_in[IN_SIZE-1:0];\n wire [OUT_WIDTH-1:0] data_out[OUT_SIZE-1:0];\n for (genvar i = 0; i < IN_SIZE; i++)\n assign data_in[i] = data_in_V_dout[i*IN_WIDTH+IN_WIDTH-1:i*IN_WIDTH];\n for (genvar i = 0; i < OUT_SIZE; i++)\n assign data_out_V_din[i*OUT_WIDTH+OUT_WIDTH-1:i*OUT_WIDTH] = data_out[i];\n\n `AUTOTB_DUT `AUTOTB_DUT_INST(\n .clk(ap_clk),\n .rst(ap_rst),\n .data_in(data_in),\n .data_in_valid(data_in_V_empty_n),\n .data_in_ready(data_in_V_read),\n .data_out(data_out),\n .data_out_ready(data_out_V_full_n),\n .data_out_valid(data_out_V_write));\n\n assign ap_done = data_out_V_write;\n assign ap_ready = data_out_V_write;\n assign ap_idle = ~ap_start;\n\n // Assignment for control signal\n assign ap_clk = AESL_clock;\n assign ap_rst = dut_rst;\n assign ap_rst_n = ~dut_rst;\n assign AESL_reset = rst;\n assign ap_start = AESL_start;\n assign AESL_start = start;\n assign AESL_done = ap_done;\n assign AESL_idle = ap_idle;\n assign AESL_ready = ap_ready;\n assign AESL_ce = ce;\n assign AESL_continue = tb_continue;\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n end else begin\n if (AESL_done !== 1 && AESL_done !== 0) begin\n $display(\"ERROR: Control signal AESL_done is invalid!\");\n $finish;\n end\n end\n end\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n end else begin\n if (AESL_ready !== 1 && AESL_ready !== 0) begin\n $display(\"ERROR: Control signal AESL_ready is invalid!\");\n $finish;\n end\n end\n end\n // Fifo Instantiation data_in_V\n\n wire fifodata_in_V_rd;\n wire [IN_WIDTH*IN_SIZE-1 : 0] fifodata_in_V_dout;\n wire fifodata_in_V_empty_n;\n wire fifodata_in_V_ready;\n wire fifodata_in_V_done;\n reg [31:0] ap_c_n_tvin_trans_num_data_in_V;\n reg data_in_V_ready_reg;\n\n `AESL_FIFO_data_in_V `AESL_FIFO_INST_data_in_V(\n .clk (AESL_clock),\n .reset (AESL_reset),\n .if_write (),\n .if_din (),\n .if_full_n (),\n .if_read (fifodata_in_V_rd),\n .if_dout (fifodata_in_V_dout),\n .if_empty_n (fifodata_in_V_empty_n),\n .ready (fifodata_in_V_ready),\n .done (fifodata_in_V_done));\n\n // Assignment between dut and fifodata_in_V\n\n // Assign input of fifodata_in_V\n assign fifodata_in_V_rd = data_in_V_read & data_in_V_empty_n;\n assign fifodata_in_V_ready = data_in_V_ready_reg | ready_initial;\n assign fifodata_in_V_done = 0;\n // Assign input of dut\n assign data_in_V_dout = fifodata_in_V_dout;\n reg reg_fifodata_in_V_empty_n;\n initial begin : gen_reg_fifodata_in_V_empty_n_process\n integer proc_rand;\n reg_fifodata_in_V_empty_n = fifodata_in_V_empty_n;\n while (1) begin\n @(fifodata_in_V_empty_n);\n reg_fifodata_in_V_empty_n = fifodata_in_V_empty_n;\n end\n end\n\n assign data_in_V_empty_n = reg_fifodata_in_V_empty_n;\n\n\n //------------------------Fifodata_out_V Instantiation--------------\n\n // The input and output of fifodata_out_V\n wire fifodata_out_V_wr;\n wire [OUT_SIZE*OUT_WIDTH-1 : 0] fifodata_out_V_din;\n wire fifodata_out_V_full_n;\n wire fifodata_out_V_ready;\n wire fifodata_out_V_done;\n\n `AESL_FIFO_data_out_V `AESL_FIFO_INST_data_out_V(\n .clk (AESL_clock),\n .reset (AESL_reset),\n .if_write (fifodata_out_V_wr),\n .if_din (fifodata_out_V_din),\n .if_full_n (fifodata_out_V_full_n),\n .if_read (),\n .if_dout (),\n .if_empty_n (),\n .ready (fifodata_out_V_ready),\n .done (fifodata_out_V_done));\n\n // Assignment between dut and fifodata_out_V\n\n // Assign input of fifodata_out_V\n assign fifodata_out_V_wr = data_out_V_write & data_out_V_full_n;\n assign fifodata_out_V_din = data_out_V_din;\n assign fifodata_out_V_ready = 0; //ready_initial | AESL_done_delay;\n assign fifodata_out_V_done = AESL_done_delay;\n // Assign input of dut\n reg reg_fifodata_out_V_full_n;\n initial begin : gen_reg_fifodata_out_V_full_n_process\n integer proc_rand;\n reg_fifodata_out_V_full_n = fifodata_out_V_full_n;\n while (1) begin\n @(fifodata_out_V_full_n);\n reg_fifodata_out_V_full_n = fifodata_out_V_full_n;\n end\n end\n\n assign data_out_V_full_n = reg_fifodata_out_V_full_n;\n\n\n initial begin : generate_AESL_ready_cnt_proc\n AESL_ready_cnt = 0;\n wait (AESL_reset === 0);\n while (AESL_ready_cnt != OUT_TRANSACTION_NUM) begin\n while (AESL_ready !== 1) begin\n @(posedge AESL_clock);\n #0.4;\n end\n @(negedge AESL_clock);\n AESL_ready_cnt = AESL_ready_cnt + 1;\n @(posedge AESL_clock);\n #0.4;\n end\n end\n\n event next_trigger_ready_cnt;\n\n initial begin : gen_ready_cnt\n ready_cnt = 0;\n wait (AESL_reset === 0);\n forever begin\n @(posedge AESL_clock);\n if (ready == 1) begin\n if (ready_cnt < OUT_TRANSACTION_NUM) begin\n ready_cnt = ready_cnt + 1;\n end\n end\n ->next_trigger_ready_cnt;\n end\n end\n\n wire all_finish = (done_cnt == OUT_TRANSACTION_NUM);\n\n // done_cnt\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n done_cnt <= 0;\n end else begin\n if (AESL_done == 1) begin\n if (done_cnt < OUT_TRANSACTION_NUM) begin\n done_cnt <= done_cnt + 1;\n end\n end\n end\n end\n\n initial begin : finish_simulation\n integer fp1;\n integer fp2;\n wait (all_finish == 1);\n // last transaction is saved at negedge right after last done\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n fp1 = $fopen(`SW_DATA_OUT_DAT, \"r\");\n fp2 = $fopen(`HW_DATA_OUT_DAT, \"r\");\n if (fp1 == 0) // Failed to open file\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"\", `SW_DATA_OUT_DAT);\n else if (fp2 == 0) $display(\"Failed to open file \\\\\\\"%s\\\\\\\"\", `HW_DATA_OUT_DAT);\n else begin\n $display(\n \"Comparing \\\\\\\"%s\\\\\\\" with \\\\\\\"%s\\\\\\\"\", `SW_DATA_OUT_DAT, `HW_DATA_OUT_DAT);\n post_check(fp1, fp2);\n end\n $fclose(fp1);\n $fclose(fp2);\n $display(\"Simulation PASS.\");\n $finish;\n end\n\n initial begin\n AESL_clock = 0;\n forever #`AUTOTB_CLOCK_PERIOD_DIV2 AESL_clock = ~AESL_clock;\n end\n\n\n reg end_data_in_V;\n reg [31:0] size_data_in_V;\n reg [31:0] size_data_in_V_backup;\n reg end_data_out_V;\n reg [31:0] size_data_out_V;\n reg [31:0] size_data_out_V_backup;\n\n initial begin : initial_process\n integer proc_rand;\n rst = 1;\n #100;\n repeat (0 + 3) @(posedge AESL_clock);\n rst = 0;\n end\n initial begin : initial_process_for_dut_rst\n integer proc_rand;\n dut_rst = 1;\n #100;\n repeat (3) @(posedge AESL_clock);\n dut_rst = 0;\n end\n initial begin : start_process\n integer proc_rand;\n reg [31:0] start_cnt;\n ce = 1;\n start = 0;\n start_cnt = 0;\n wait (AESL_reset === 0);\n @(posedge AESL_clock);\n #0 start = 1;\n start_cnt = start_cnt + 1;\n forever begin\n if (start_cnt >= OUT_TRANSACTION_NUM + 1) begin\n #0 start = 0;\n end\n @(posedge AESL_clock);\n if (AESL_ready) begin\n start_cnt = start_cnt + 1;\n end\n end\n end\n\n always @(AESL_done) begin\n tb_continue = AESL_done;\n end\n\n initial begin : ready_initial_process\n ready_initial = 0;\n wait (AESL_start === 1);\n ready_initial = 1;\n @(posedge AESL_clock);\n ready_initial = 0;\n end\n\n always @(posedge AESL_clock) begin\n if (AESL_reset) AESL_ready_delay = 0;\n else AESL_ready_delay = AESL_ready;\n end\n initial begin : ready_last_n_process\n ready_last_n = 1;\n wait (ready_cnt == OUT_TRANSACTION_NUM) @(posedge AESL_clock);\n ready_last_n <= 0;\n end\n\n always @(posedge AESL_clock) begin\n if (AESL_reset) ready_delay_last_n = 0;\n else ready_delay_last_n <= ready_last_n;\n end\n assign ready = (ready_initial | AESL_ready_delay);\n assign ready_wire = ready_initial | AESL_ready_delay;\n initial begin : done_delay_last_n_process\n done_delay_last_n = 1;\n while (done_cnt < OUT_TRANSACTION_NUM) @(posedge AESL_clock);\n #0.1;\n done_delay_last_n = 0;\n end\n\n always @(posedge AESL_clock) begin\n if (AESL_reset) begin\n AESL_done_delay <= 0;\n AESL_done_delay2 <= 0;\n end else begin\n AESL_done_delay <= AESL_done & done_delay_last_n;\n AESL_done_delay2 <= AESL_done_delay;\n end\n end\n always @(posedge AESL_clock) begin\n if (AESL_reset) interface_done = 0;\n else begin\n #0.01;\n if (ready === 1 && ready_cnt > 0 && ready_cnt < OUT_TRANSACTION_NUM) interface_done = 1;\n else if (AESL_done_delay === 1 && done_cnt == OUT_TRANSACTION_NUM) interface_done = 1;\n else interface_done = 0;\n end\n end\n initial begin : proc_gen_data_in_V_internal_ready\n integer internal_trans_num;\n wait (AESL_reset === 0);\n wait (ready_initial === 1);\n data_in_V_ready_reg <= 0;\n @(posedge AESL_clock);\n internal_trans_num = 1;\n while (internal_trans_num != IN_TRANSACTION_NUM + 1) begin\n if (ap_c_n_tvin_trans_num_data_in_V > internal_trans_num) begin\n data_in_V_ready_reg <= 1;\n @(posedge AESL_clock);\n data_in_V_ready_reg <= 0;\n internal_trans_num = internal_trans_num + 1;\n end else begin\n @(posedge AESL_clock);\n end\n end\n data_in_V_ready_reg <= 0;\n end\n\n `define STREAM_SIZE_IN_data_in_V \"{hw_stream_size}\"\n\n initial begin : gen_ap_c_n_tvin_trans_num_data_in_V\n integer fp_data_in_V;\n reg [TOKEN_WIDTH-1:0] token_data_in_V;\n integer ret;\n\n ap_c_n_tvin_trans_num_data_in_V = 0;\n end_data_in_V = 0;\n wait (AESL_reset === 0);\n\n fp_data_in_V = $fopen(`STREAM_SIZE_IN_data_in_V, \"r\");\n if (fp_data_in_V == 0) begin\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", `STREAM_SIZE_IN_data_in_V);\n $finish;\n end\n read_token(fp_data_in_V, token_data_in_V); // should be [[[runtime]]]\n if (token_data_in_V != \"[[[runtime]]]\") begin\n $display(\"ERROR: token_data_in_V != \\\\\\\"[[[runtime]]]\\\\\\\"\");\n $finish;\n end\n size_data_in_V = 0;\n size_data_in_V_backup = 0;\n while (size_data_in_V == 0 && end_data_in_V == 0) begin\n ap_c_n_tvin_trans_num_data_in_V = ap_c_n_tvin_trans_num_data_in_V + 1;\n read_token(fp_data_in_V, token_data_in_V); // should be [[transaction]] or [[[/runtime]]]\n if (token_data_in_V == \"[[transaction]]\") begin\n read_token(fp_data_in_V, token_data_in_V); // should be transaction number\n read_token(fp_data_in_V, token_data_in_V); // should be size for hls::stream\n ret = $sscanf(token_data_in_V, \"%d\", size_data_in_V);\n if (size_data_in_V > 0) begin\n size_data_in_V_backup = size_data_in_V;\n end\n read_token(fp_data_in_V, token_data_in_V); // should be [[/transaction]]\n end else if (token_data_in_V == \"[[[/runtime]]]\") begin\n $fclose(fp_data_in_V);\n end_data_in_V = 1;\n end else begin\n $display(\"ERROR: unknown token_data_in_V\");\n $finish;\n end\n end\n forever begin\n @(posedge AESL_clock);\n if (end_data_in_V == 0) begin\n if (data_in_V_read == 1 && data_in_V_empty_n == 1) begin\n if (size_data_in_V > 0) begin\n size_data_in_V = size_data_in_V - 1;\n while (size_data_in_V == 0 && end_data_in_V == 0) begin\n ap_c_n_tvin_trans_num_data_in_V = ap_c_n_tvin_trans_num_data_in_V + 1;\n read_token(fp_data_in_V,\n token_data_in_V); // should be [[transaction]] or [[[/runtime]]]\n if (token_data_in_V == \"[[transaction]]\") begin\n read_token(fp_data_in_V, token_data_in_V); // should be transaction number\n read_token(fp_data_in_V, token_data_in_V); // should be size for hls::stream\n ret = $sscanf(token_data_in_V, \"%d\", size_data_in_V);\n if (size_data_in_V > 0) begin\n size_data_in_V_backup = size_data_in_V;\n end\n read_token(fp_data_in_V, token_data_in_V); // should be [[/transaction]]\n end else if (token_data_in_V == \"[[[/runtime]]]\") begin\n size_data_in_V = size_data_in_V_backup;\n $fclose(fp_data_in_V);\n end_data_in_V = 1;\n end else begin\n $display(\"ERROR: unknown token_data_in_V\");\n $finish;\n end\n end\n end\n end\n end else begin\n if (data_in_V_read == 1 && data_in_V_empty_n == 1) begin\n if (size_data_in_V > 0) begin\n size_data_in_V = size_data_in_V - 1;\n if (size_data_in_V == 0) begin\n ap_c_n_tvin_trans_num_data_in_V = ap_c_n_tvin_trans_num_data_in_V + 1;\n size_data_in_V = size_data_in_V_backup;\n end\n end\n end\n end\n end\n end\n\n\n reg dump_tvout_finish_data_out_V;\n\n initial begin : dump_tvout_runtime_sign_data_out_V\n integer fp;\n dump_tvout_finish_data_out_V = 0;\n fp = $fopen(`HW_DATA_OUT_DAT, \"w\");\n if (fp == 0) begin\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", `HW_DATA_OUT_DAT);\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n $fdisplay(fp, \"[[[runtime]]]\");\n $fclose(fp);\n wait (done_cnt == OUT_TRANSACTION_NUM);\n // last transaction is saved at negedge right after last done\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n @(posedge AESL_clock);\n fp = $fopen(`HW_DATA_OUT_DAT, \"a\");\n if (fp == 0) begin\n $display(\"Failed to open file \\\\\\\"%s\\\\\\\"!\", `HW_DATA_OUT_DAT);\n $display(\"ERROR: Simulation using HLS TB failed.\");\n $finish;\n end\n $fdisplay(fp, \"[[[/runtime]]]\");\n $fclose(fp);\n dump_tvout_finish_data_out_V = 1;\n end\n\n\n ////////////////////////////////////////////\n // progress and performance\n ////////////////////////////////////////////\n\n task wait_start();\n while (~AESL_start) begin\n @(posedge AESL_clock);\n end\n endtask\n\n reg [31:0] clk_cnt = 0;\n reg AESL_ready_p1;\n reg AESL_start_p1;\n\n always @(posedge AESL_clock) begin\n if (AESL_reset == 1) begin\n clk_cnt <= 32'h0;\n AESL_ready_p1 <= 1'b0;\n AESL_start_p1 <= 1'b0;\n end else begin\n clk_cnt <= clk_cnt + 1;\n AESL_ready_p1 <= AESL_ready;\n AESL_start_p1 <= AESL_start;\n end\n end\n\n reg [31:0] start_timestamp[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] start_cnt;\n reg [31:0] ready_timestamp[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] ap_ready_cnt;\n reg [31:0] finish_timestamp[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] finish_cnt;\n reg [31:0] lat_total;\n event report_progress;\n\n always @(posedge AESL_clock) begin\n if (finish_cnt == OUT_TRANSACTION_NUM - 1 && AESL_done == 1'b1)\n lat_total = clk_cnt - start_timestamp[0];\n end\n\n initial begin\n start_cnt = 0;\n finish_cnt = 0;\n ap_ready_cnt = 0;\n wait (AESL_reset == 0);\n wait_start();\n start_timestamp[start_cnt] = clk_cnt;\n start_cnt = start_cnt + 1;\n if (AESL_done) begin\n finish_timestamp[finish_cnt] = clk_cnt;\n finish_cnt = finish_cnt + 1;\n end\n ->report_progress;\n forever begin\n @(posedge AESL_clock);\n if (start_cnt < OUT_TRANSACTION_NUM) begin\n if ((AESL_start && AESL_ready_p1) || (AESL_start && ~AESL_start_p1)) begin\n start_timestamp[start_cnt] = clk_cnt;\n start_cnt = start_cnt + 1;\n end\n end\n if (ap_ready_cnt < OUT_TRANSACTION_NUM) begin\n if (AESL_start_p1 && AESL_ready_p1) begin\n ready_timestamp[ap_ready_cnt] = clk_cnt;\n ap_ready_cnt = ap_ready_cnt + 1;\n end\n end\n if (finish_cnt < OUT_TRANSACTION_NUM) begin\n if (AESL_done) begin\n finish_timestamp[finish_cnt] = clk_cnt;\n finish_cnt = finish_cnt + 1;\n end\n end\n ->report_progress;\n end\n end\n\n reg [31:0] progress_timeout;\n\n initial begin : simulation_progress\n real intra_progress;\n wait (AESL_reset == 0);\n progress_timeout = PROGRESS_TIMEOUT;\n $display(\n \"////////////////////////////////////////////////////////////////////////////////////\");\n $display(\"// Inter-Transaction Progress: Completed Transaction / Total Transaction\");\n $display(\"// Intra-Transaction Progress: Measured Latency / Latency Estimation * 100%%\");\n $display(\"//\");\n $display(\n \"// RTL Simulation : \\\\\\\"Inter-Transaction Progress\\\\\\\" [\\\\\\\"Intra-Transaction Progress\\\\\\\"] @ \\\\\\\"Simulation Time\\\\\\\"\");\n $display(\n \"////////////////////////////////////////////////////////////////////////////////////\");\n print_progress();\n while (finish_cnt < OUT_TRANSACTION_NUM) begin\n @(report_progress);\n if (finish_cnt < OUT_TRANSACTION_NUM) begin\n if (AESL_done) begin\n print_progress();\n progress_timeout = PROGRESS_TIMEOUT;\n end else begin\n if (progress_timeout == 0) begin\n print_progress();\n progress_timeout = PROGRESS_TIMEOUT;\n end else begin\n progress_timeout = progress_timeout - 1;\n end\n end\n end\n end\n print_progress();\n $display(\n \"////////////////////////////////////////////////////////////////////////////////////\");\n calculate_performance();\n end\n\n task get_intra_progress(output real intra_progress);\n begin\n if (start_cnt > finish_cnt) begin\n intra_progress = clk_cnt - start_timestamp[finish_cnt];\n end else if (finish_cnt > 0) begin\n intra_progress = LATENCY_ESTIMATION;\n end else begin\n intra_progress = 0;\n end\n intra_progress = intra_progress / LATENCY_ESTIMATION;\n end\n endtask\n\n task print_progress();\n real intra_progress;\n begin\n if (LATENCY_ESTIMATION > 0) begin\n get_intra_progress(intra_progress);\n $display(\"// RTL Simulation : %0d / %0d [%2.2f%%] @ \\\\\\\"%0t\\\\\\\"\", finish_cnt,\n OUT_TRANSACTION_NUM, intra_progress * 100, $time);\n end else begin\n $display(\"// RTL Simulation : %0d / %0d [n/a] @ \\\\\\\"%0t\\\\\\\"\", finish_cnt,\n OUT_TRANSACTION_NUM, $time);\n end\n end\n endtask\n\n task calculate_performance();\n integer i;\n integer fp;\n reg [31:0] latency[0:OUT_TRANSACTION_NUM - 1];\n reg [31:0] latency_min;\n reg [31:0] latency_max;\n reg [31:0] latency_total;\n reg [31:0] latency_average;\n reg [31:0] interval[0:OUT_TRANSACTION_NUM - 2];\n reg [31:0] interval_min;\n reg [31:0] interval_max;\n reg [31:0] interval_total;\n reg [31:0] interval_average;\n reg [31:0] total_execute_time;\n begin\n latency_min = -1;\n latency_max = 0;\n latency_total = 0;\n interval_min = -1;\n interval_max = 0;\n interval_total = 0;\n total_execute_time = lat_total;\n\n for (i = 0; i < OUT_TRANSACTION_NUM; i = i + 1) begin\n // calculate latency\n latency[i] = finish_timestamp[i] - start_timestamp[i];\n if (latency[i] > latency_max) latency_max = latency[i];\n if (latency[i] < latency_min) latency_min = latency[i];\n latency_total = latency_total + latency[i];\n // calculate interval\n if (OUT_TRANSACTION_NUM == 1) begin\n interval[i] = 0;\n interval_max = 0;\n interval_min = 0;\n interval_total = 0;\n end else if (i < OUT_TRANSACTION_NUM - 1) begin\n interval[i] = start_timestamp[i+1] - start_timestamp[i];\n if (interval[i] > interval_max) interval_max = interval[i];\n if (interval[i] < interval_min) interval_min = interval[i];\n interval_total = interval_total + interval[i];\n end\n end\n\n latency_average = latency_total / OUT_TRANSACTION_NUM;\n if (OUT_TRANSACTION_NUM == 1) begin\n interval_average = 0;\n end else begin\n interval_average = interval_total / (OUT_TRANSACTION_NUM - 1);\n end\n\n fp = $fopen(`AUTOTB_LAT_RESULT_FILE, \"w\");\n\n $fdisplay(fp, \"$MAX_LATENCY = \\\\\\\"%0d\\\\\\\"\", latency_max);\n $fdisplay(fp, \"$MIN_LATENCY = \\\\\\\"%0d\\\\\\\"\", latency_min);\n $fdisplay(fp, \"$AVER_LATENCY = \\\\\\\"%0d\\\\\\\"\", latency_average);\n $fdisplay(fp, \"$MAX_THROUGHPUT = \\\\\\\"%0d\\\\\\\"\", interval_max);\n $fdisplay(fp, \"$MIN_THROUGHPUT = \\\\\\\"%0d\\\\\\\"\", interval_min);\n $fdisplay(fp, \"$AVER_THROUGHPUT = \\\\\\\"%0d\\\\\\\"\", interval_average);\n $fdisplay(fp, \"$TOTAL_EXECUTE_TIME = \\\\\\\"%0d\\\\\\\"\", total_execute_time);\n\n $fclose(fp);\n\n fp = $fopen(`AUTOTB_PER_RESULT_TRANS_FILE, \"w\");\n\n $fdisplay(fp, \"%20s%16s%16s\", \"\", \"latency\", \"interval\");\n if (OUT_TRANSACTION_NUM == 1) begin\n i = 0;\n $fdisplay(fp, \"transaction%8d:%16d%16d\", i, latency[i], interval[i]);\n end else begin\n for (i = 0; i < OUT_TRANSACTION_NUM; i = i + 1) begin\n if (i < OUT_TRANSACTION_NUM - 1) begin\n $fdisplay(fp, \"transaction%8d:%16d%16d\", i, latency[i], interval[i]);\n end else begin\n $fdisplay(fp, \"transaction%8d:%16d x\", i, latency[i]);\n end\n end\n end\n\n $fclose(fp);\n end\n endtask\n\n\n ////////////////////////////////////////////\n // Dependence Check\n ////////////////////////////////////////////\n\n`ifndef POST_SYN\n\n`endif\n ///////////////////////////////////////////////////////\n // dataflow status monitor\n ///////////////////////////////////////////////////////\n // dataflow_monitor U_dataflow_monitor (\n // .clock (AESL_clock),\n // .reset (rst),\n // .finish(all_finish)\n // );\n\n // `include \"fifo_para.v\"\n\nendmodule\n\"\"\"\n\n with open(out_file, \"w\", encoding=\"utf-8\") as outf:\n outf.write(buff)\n logger.debug(f\"Top-level test bench emitted to {out_file}\")\n assert os.path.isfile(out_file), \"Emitting top-level test bench failed.\"\n os.system(f\"verible-verilog-format --inplace {out_file}\")" } ]
import math, time, os, logging, torch, glob, shutil from chop.passes.graph.utils import vf, v2p, init_project from chop.passes.graph.transforms.quantize.quantizers import integer_quantizer_for_hw from .emit_tb_data_in import emit_data_in_tb_sv, emit_data_in_tb_dat from .emit_tb_data_out import emit_data_out_tb_sv, emit_data_out_tb_dat from .emit_tb_testbench import emit_top_tb from pathlib import Path
13,095
w_in_param = graph.nodes_in[0].meta["mase"].parameters["common"]["args"] in_width = w_in_param["data_in_0"]["precision"][0] in_size = v_in_param["DATA_IN_0_TENSOR_SIZE_DIM_0"] data_width = in_width * in_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_in.dat") out_file = os.path.join(v_dir, f"top_data_in_fifo.sv") emit_data_in_tb_sv(data_width, load_path, out_file) v_out_param = ( graph.nodes_out[0].meta["mase"].parameters["hardware"]["verilog_param"] ) w_out_param = graph.nodes_in[0].meta["mase"].parameters["common"]["results"] out_width = w_out_param["data_out_0"]["precision"][0] out_size = v_out_param["DATA_OUT_0_TENSOR_SIZE_0_DIM_0"] data_width = out_width * out_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_out.dat") store_path = os.path.join(tv_dir, f"hw_data_out.dat") out_file = os.path.join(v_dir, f"top_data_out_fifo.sv") emit_data_out_tb_sv(data_width, load_path, store_path, out_file) out_file = os.path.join(v_dir, f"top_tb.sv") # in_trans_num = v_in_param["DATA_IN_0_DEPTH"] * trans_num in_trans_num = trans_num out_trans_num = trans_num emit_top_tb( tv_dir, "top", out_file, in_width, in_size, out_width, out_size, in_trans_num, out_trans_num, ) out_file = os.path.join(v_dir, f"fifo_para.v") with open(out_file, "w", encoding="utf-8") as outf: outf.write("// initial a empty file") # Copy testbench components dut_dir = os.path.join(project_dir, "hardware", "rtl") for svfile in glob.glob(os.path.join(dut_dir, "*.sv")): shutil.copy(svfile, v_dir) def emit_tb_dat(graph, trans_num=1, project_dir="top", test_inputs=None): """ Emit the test vectors in dat files for simulation """ sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) in_type = ( graph.nodes_in[0].meta["mase"].parameters["common"]["args"]["data_in_0"]["type"] ) in_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][0] ) in_frac_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][1] ) sw_data_out = [graph.model(trans) for trans in test_inputs] out_type = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["type"] ) prec = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"] ) out_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][0] ) if len(prec) > 1: out_frac_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][1] ) else: out_frac_width = 0 # TODO: Make out_type as input to support casting to any type hw_data_out = [ integer_quantizer_for_hw(trans, width=out_width, frac_width=out_frac_width) .squeeze(0) .to(torch.int) for trans in sw_data_out ] # TODO: for now for i, trans in enumerate(test_inputs): test_inputs[i] = torch.flatten(trans).tolist() for i, trans in enumerate(hw_data_out): hw_data_out[i] = torch.flatten(trans).tolist() load_path = os.path.join(tv_dir, "sw_data_in.dat")
logger = logging.getLogger(__name__) def emit_tb_verilog(graph, trans_num=1, project_dir="top"): sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) v_dir = os.path.join(sim_dir, "verilog") if not os.path.exists(v_dir): os.mkdir(v_dir) # TODO : need to emit all the inputs v_in_param = graph.nodes_in[0].meta["mase"].parameters["hardware"]["verilog_param"] w_in_param = graph.nodes_in[0].meta["mase"].parameters["common"]["args"] in_width = w_in_param["data_in_0"]["precision"][0] in_size = v_in_param["DATA_IN_0_TENSOR_SIZE_DIM_0"] data_width = in_width * in_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_in.dat") out_file = os.path.join(v_dir, f"top_data_in_fifo.sv") emit_data_in_tb_sv(data_width, load_path, out_file) v_out_param = ( graph.nodes_out[0].meta["mase"].parameters["hardware"]["verilog_param"] ) w_out_param = graph.nodes_in[0].meta["mase"].parameters["common"]["results"] out_width = w_out_param["data_out_0"]["precision"][0] out_size = v_out_param["DATA_OUT_0_TENSOR_SIZE_0_DIM_0"] data_width = out_width * out_size # TODO : need to check addr_width = 1 depth = 1 load_path = os.path.join(tv_dir, f"sw_data_out.dat") store_path = os.path.join(tv_dir, f"hw_data_out.dat") out_file = os.path.join(v_dir, f"top_data_out_fifo.sv") emit_data_out_tb_sv(data_width, load_path, store_path, out_file) out_file = os.path.join(v_dir, f"top_tb.sv") # in_trans_num = v_in_param["DATA_IN_0_DEPTH"] * trans_num in_trans_num = trans_num out_trans_num = trans_num emit_top_tb( tv_dir, "top", out_file, in_width, in_size, out_width, out_size, in_trans_num, out_trans_num, ) out_file = os.path.join(v_dir, f"fifo_para.v") with open(out_file, "w", encoding="utf-8") as outf: outf.write("// initial a empty file") # Copy testbench components dut_dir = os.path.join(project_dir, "hardware", "rtl") for svfile in glob.glob(os.path.join(dut_dir, "*.sv")): shutil.copy(svfile, v_dir) def emit_tb_dat(graph, trans_num=1, project_dir="top", test_inputs=None): """ Emit the test vectors in dat files for simulation """ sim_dir = os.path.join(project_dir, "hardware", "sim") tv_dir = os.path.join(sim_dir, "tv") if not os.path.exists(tv_dir): os.mkdir(tv_dir) in_type = ( graph.nodes_in[0].meta["mase"].parameters["common"]["args"]["data_in_0"]["type"] ) in_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][0] ) in_frac_width = ( graph.nodes_in[0] .meta["mase"] .parameters["common"]["args"]["data_in_0"]["precision"][1] ) sw_data_out = [graph.model(trans) for trans in test_inputs] out_type = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["type"] ) prec = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"] ) out_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][0] ) if len(prec) > 1: out_frac_width = ( graph.nodes_out[0] .meta["mase"] .parameters["common"]["results"]["data_out_0"]["precision"][1] ) else: out_frac_width = 0 # TODO: Make out_type as input to support casting to any type hw_data_out = [ integer_quantizer_for_hw(trans, width=out_width, frac_width=out_frac_width) .squeeze(0) .to(torch.int) for trans in sw_data_out ] # TODO: for now for i, trans in enumerate(test_inputs): test_inputs[i] = torch.flatten(trans).tolist() for i, trans in enumerate(hw_data_out): hw_data_out[i] = torch.flatten(trans).tolist() load_path = os.path.join(tv_dir, "sw_data_in.dat")
emit_data_in_tb_dat(graph.nodes_in[0], test_inputs, load_path)
1
2023-12-18 12:50:53+00:00
16k
OPPOMKLab/u-LLaVA
models/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "models/GroundingDINO/groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "get_tokenlizer", "path": "models/GroundingDINO/groundingdino/util/get_tokenlizer.py", "snippet": "def get_tokenlizer(text_encoder_type):\n if not isinstance(text_encoder_type, str):\n # print(\"text_encoder_type is not a str\")\n if hasattr(text_encoder_type, \"text_encoder_type\"):\n text_encoder_type = text_encoder_type.text_encoder_type\n elif text_encoder_type.get(\"text_encoder_type\", False):\n text_encoder_type = text_encoder_type.get(\"text_encoder_type\")\n elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):\n pass\n else:\n raise ValueError(\n \"Unknown type of text_encoder_type: {}\".format(type(text_encoder_type))\n )\n print(\"final text_encoder_type: {}\".format(text_encoder_type))\n\n tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)\n return tokenizer" }, { "identifier": "NestedTensor", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == \"auto\":\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\n \"tensors dim must be 3 or 4 but {}({})\".format(\n self.tensors.dim(), self.tensors.shape\n )\n )\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\"tensors.shape\": self.tensors.shape, \"mask.shape\": self.mask.shape}" }, { "identifier": "accuracy", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "inverse_sigmoid", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1 / x2)" }, { "identifier": "is_dist_avail_and_initialized", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "nested_tensor_from_tensor_list", "path": "models/GroundingDINO/groundingdino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], : img.shape[2]] = False\n else:\n raise ValueError(\"not supported\")\n return NestedTensor(tensor, mask)" }, { "identifier": "get_phrases_from_posmap", "path": "models/GroundingDINO/groundingdino/util/utils.py", "snippet": "def get_phrases_from_posmap(\n posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255\n):\n assert isinstance(posmap, torch.Tensor), \"posmap must be torch.Tensor\"\n if posmap.dim() == 1:\n posmap[0: left_idx + 1] = False\n posmap[right_idx:] = False\n non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()\n token_ids = [tokenized[\"input_ids\"][i] for i in non_zero_idx]\n return tokenizer.decode(token_ids)\n else:\n raise NotImplementedError(\"posmap must be 1-dim\")" }, { "identifier": "COCOVisualizer", "path": "models/GroundingDINO/groundingdino/util/visualizer.py", "snippet": "class COCOVisualizer:\n def __init__(self, coco=None, tokenlizer=None) -> None:\n self.coco = coco\n\n def visualize(self, img, tgt, caption=None, dpi=180, savedir=\"vis\"):\n \"\"\"\n img: tensor(3, H, W)\n tgt: make sure they are all on cpu.\n must have items: 'image_id', 'boxes', 'size'\n \"\"\"\n plt.figure(dpi=dpi)\n plt.rcParams[\"font.size\"] = \"5\"\n ax = plt.gca()\n img = renorm(img).permute(1, 2, 0)\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n ax.imshow(img)\n\n self.addtgt(tgt)\n\n if tgt is None:\n image_id = 0\n elif \"image_id\" not in tgt:\n image_id = 0\n else:\n image_id = tgt[\"image_id\"]\n\n if caption is None:\n savename = \"{}/{}-{}.png\".format(\n savedir, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n else:\n savename = \"{}/{}-{}-{}.png\".format(\n savedir, caption, int(image_id), str(datetime.datetime.now()).replace(\" \", \"-\")\n )\n print(\"savename: {}\".format(savename))\n os.makedirs(os.path.dirname(savename), exist_ok=True)\n plt.savefig(savename)\n plt.close()\n\n def addtgt(self, tgt):\n \"\"\" \"\"\"\n if tgt is None or not \"boxes\" in tgt:\n ax = plt.gca()\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n\n ax.set_axis_off()\n return\n\n ax = plt.gca()\n H, W = tgt[\"size\"]\n numbox = tgt[\"boxes\"].shape[0]\n\n color = []\n polygons = []\n boxes = []\n for box in tgt[\"boxes\"].cpu():\n unnormbbox = box * torch.Tensor([W, H, W, H])\n unnormbbox[:2] -= unnormbbox[2:] / 2\n [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()\n boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n color.append(c)\n\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n\n if \"strings_positive\" in tgt and len(tgt[\"strings_positive\"]) > 0:\n assert (\n len(tgt[\"strings_positive\"]) == numbox\n ), f\"{len(tgt['strings_positive'])} = {numbox}, \"\n for idx, strlist in enumerate(tgt[\"strings_positive\"]):\n cate_id = int(tgt[\"labels\"][idx])\n _string = str(cate_id) + \":\" + \" \".join(strlist)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"box_label\" in tgt:\n assert len(tgt[\"box_label\"]) == numbox, f\"{len(tgt['box_label'])} = {numbox}, \"\n for idx, bl in enumerate(tgt[\"box_label\"]):\n _string = str(bl)\n bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n ax.text(\n bbox_x,\n bbox_y,\n _string,\n color=\"black\",\n bbox={\"facecolor\": color[idx], \"alpha\": 0.6, \"pad\": 1},\n )\n\n if \"caption\" in tgt:\n ax.set_title(tgt[\"caption\"], wrap=True)\n # plt.figure()\n # rainbow_text(0.0,0.0,\"all unicorns poop rainbows ! ! !\".split(),\n # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])\n\n if \"attn\" in tgt:\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n if isinstance(tgt[\"attn\"], tuple):\n tgt[\"attn\"] = [tgt[\"attn\"]]\n for item in tgt[\"attn\"]:\n attn_map, basergb = item\n attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)\n attn_map = (attn_map * 255).astype(np.uint8)\n cm = ColorMap(basergb)\n heatmap = cm(attn_map)\n ax.imshow(heatmap)\n ax.set_axis_off()\n\n def showAnns(self, anns, draw_bbox=False):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if \"segmentation\" in anns[0] or \"keypoints\" in anns[0]:\n datasetType = \"instances\"\n elif \"caption\" in anns[0]:\n datasetType = \"captions\"\n else:\n raise Exception(\"datasetType not supported\")\n if datasetType == \"instances\":\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]\n if \"segmentation\" in ann:\n if type(ann[\"segmentation\"]) == list:\n # polygon\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((int(len(seg) / 2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann[\"image_id\"]]\n if type(ann[\"segmentation\"][\"counts\"]) == list:\n rle = maskUtils.frPyObjects(\n [ann[\"segmentation\"]], t[\"height\"], t[\"width\"]\n )\n else:\n rle = [ann[\"segmentation\"]]\n m = maskUtils.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n if ann[\"iscrowd\"] == 1:\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n if ann[\"iscrowd\"] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n if \"keypoints\" in ann and type(ann[\"keypoints\"]) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann[\"category_id\"])[0][\"skeleton\"]) - 1\n kp = np.array(ann[\"keypoints\"])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk] > 0):\n plt.plot(x[sk], y[sk], linewidth=3, color=c)\n plt.plot(\n x[v > 0],\n y[v > 0],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=\"k\",\n markeredgewidth=2,\n )\n plt.plot(\n x[v > 1],\n y[v > 1],\n \"o\",\n markersize=8,\n markerfacecolor=c,\n markeredgecolor=c,\n markeredgewidth=2,\n )\n\n if draw_bbox:\n [bbox_x, bbox_y, bbox_w, bbox_h] = ann[\"bbox\"]\n poly = [\n [bbox_x, bbox_y],\n [bbox_x, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y + bbox_h],\n [bbox_x + bbox_w, bbox_y],\n ]\n np_poly = np.array(poly).reshape((4, 2))\n polygons.append(Polygon(np_poly))\n color.append(c)\n\n # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n # ax.add_collection(p)\n p = PatchCollection(polygons, facecolor=\"none\", edgecolors=color, linewidths=2)\n ax.add_collection(p)\n elif datasetType == \"captions\":\n for ann in anns:\n print(ann[\"caption\"])" }, { "identifier": "create_positive_map_from_span", "path": "models/GroundingDINO/groundingdino/util/vl_utils.py", "snippet": "def create_positive_map_from_span(tokenized, token_span, max_text_len=256):\n \"\"\"construct a map such that positive_map[i,j] = True iff box i is associated to token j\n Input:\n - tokenized:\n - input_ids: Tensor[1, ntokens]\n - attention_mask: Tensor[1, ntokens]\n - token_span: list with length num_boxes.\n - each item: [start_idx, end_idx]\n \"\"\"\n positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)\n for j, tok_list in enumerate(token_span):\n for (beg, end) in tok_list:\n beg_pos = tokenized.char_to_token(beg)\n end_pos = tokenized.char_to_token(end - 1)\n if beg_pos is None:\n try:\n beg_pos = tokenized.char_to_token(beg + 1)\n if beg_pos is None:\n beg_pos = tokenized.char_to_token(beg + 2)\n except:\n beg_pos = None\n if end_pos is None:\n try:\n end_pos = tokenized.char_to_token(end - 2)\n if end_pos is None:\n end_pos = tokenized.char_to_token(end - 3)\n except:\n end_pos = None\n if beg_pos is None or end_pos is None:\n continue\n\n assert beg_pos is not None and end_pos is not None\n if os.environ.get(\"SHILONG_DEBUG_ONLY_ONE_POS\", None) == \"TRUE\":\n positive_map[j, beg_pos] = 1\n break\n else:\n positive_map[j, beg_pos : end_pos + 1].fill_(1)\n\n return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/GroundingDINO/groundingdino/models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry(\"model build functions\")" }, { "identifier": "build_backbone", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone:\n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords:\n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = True\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]\n args.backbone_freeze_keywords\n use_checkpoint = getattr(args, \"use_checkpoint\", False)\n\n if args.backbone in [\"resnet50\", \"resnet101\"]:\n backbone = Backbone(\n args.backbone,\n train_backbone,\n args.dilation,\n return_interm_indices,\n batch_norm=FrozenBatchNorm2d,\n )\n bb_num_channels = backbone.num_channels\n elif args.backbone in [\n \"swin_T_224_1k\",\n \"swin_B_224_22k\",\n \"swin_B_384_22k\",\n \"swin_L_224_22k\",\n \"swin_L_384_22k\",\n ]:\n pretrain_img_size = int(args.backbone.split(\"_\")[-2])\n backbone = build_swin_transformer(\n args.backbone,\n pretrain_img_size=pretrain_img_size,\n out_indices=tuple(return_interm_indices),\n dilation=False,\n use_checkpoint=use_checkpoint,\n )\n\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n\n assert len(bb_num_channels) == len(\n return_interm_indices\n ), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels\n assert isinstance(\n bb_num_channels, List\n ), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n # import ipdb; ipdb.set_trace()\n return model" }, { "identifier": "BertModelWarper", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py", "snippet": "class BertModelWarper(nn.Module):\n def __init__(self, bert_model):\n super().__init__()\n # self.bert = bert_modelc\n\n self.config = bert_model.config\n self.embeddings = bert_model.embeddings\n self.encoder = bert_model.encoder\n self.pooler = bert_model.pooler\n\n self.get_extended_attention_mask = bert_model.get_extended_attention_mask\n self.invert_attention_mask = bert_model.invert_attention_mask\n self.get_head_mask = bert_model.get_head_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = (\n output_attentions if output_attentions is not None else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = (\n past_key_values[0][0].shape[2] if past_key_values is not None else 0\n )\n\n if attention_mask is None:\n attention_mask = torch.ones(\n ((batch_size, seq_length + past_key_values_length)), device=device\n )\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, device\n )\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n # import ipdb; ipdb.set_trace()\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )" }, { "identifier": "generate_masks_with_special_tokens", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n\n previous_col = col\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long)" }, { "identifier": "generate_masks_with_special_tokens_and_transfer_map", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py", "snippet": "def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):\n \"\"\"Generate attention mask between each pair of special tokens\n Args:\n input_ids (torch.Tensor): input ids. Shape: [bs, num_token]\n special_tokens_mask (list): special tokens mask.\n Returns:\n torch.Tensor: attention mask between each special tokens.\n \"\"\"\n input_ids = tokenized[\"input_ids\"]\n bs, num_token = input_ids.shape\n # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens\n special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()\n for special_token in special_tokens_list:\n special_tokens_mask |= input_ids == special_token\n\n # idxs: each row is a list of indices of special tokens\n idxs = torch.nonzero(special_tokens_mask)\n\n # generate attention mask and positional ids\n attention_mask = (\n torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)\n )\n position_ids = torch.zeros((bs, num_token), device=input_ids.device)\n cate_to_token_mask_list = [[] for _ in range(bs)]\n previous_col = 0\n for i in range(idxs.shape[0]):\n row, col = idxs[i]\n if (col == 0) or (col == num_token - 1):\n attention_mask[row, col, col] = True\n position_ids[row, col] = 0\n else:\n attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True\n position_ids[row, previous_col + 1 : col + 1] = torch.arange(\n 0, col - previous_col, device=input_ids.device\n )\n c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()\n c2t_maski[previous_col + 1 : col] = True\n cate_to_token_mask_list[row].append(c2t_maski)\n previous_col = col\n\n cate_to_token_mask_list = [\n torch.stack(cate_to_token_mask_listi, dim=0)\n for cate_to_token_mask_listi in cate_to_token_mask_list\n ]\n\n # # padding mask\n # padding_mask = tokenized['attention_mask']\n # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()\n\n return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list" }, { "identifier": "build_transformer", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/transformer.py", "snippet": "def build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n learnable_tgt_init=True,\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n embed_init_tgt=args.embed_init_tgt,\n use_text_enhancer=args.use_text_enhancer,\n use_fusion_layer=args.use_fusion_layer,\n use_checkpoint=args.use_checkpoint,\n use_transformer_ckpt=args.use_transformer_ckpt,\n use_text_cross_attention=args.use_text_cross_attention,\n text_dropout=args.text_dropout,\n fusion_dropout=args.fusion_dropout,\n fusion_droppath=args.fusion_droppath,\n )" }, { "identifier": "MLP", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\"Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(\n nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n )\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "ContrastiveEmbed", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py", "snippet": "class ContrastiveEmbed(nn.Module):\n def __init__(self, max_text_len=256):\n \"\"\"\n Args:\n max_text_len: max length of text.\n \"\"\"\n super().__init__()\n self.max_text_len = max_text_len\n\n def forward(self, x, text_dict):\n \"\"\"_summary_\n\n Args:\n x (_type_): _description_\n text_dict (_type_): _description_\n {\n 'encoded_text': encoded_text, # bs, 195, d_model\n 'text_token_mask': text_token_mask, # bs, 195\n # True for used tokens. False for padding tokens\n }\n Returns:\n _type_: _description_\n \"\"\"\n assert isinstance(text_dict, dict)\n\n y = text_dict[\"encoded_text\"]\n text_token_mask = text_dict[\"text_token_mask\"]\n\n res = x @ y.transpose(-1, -2)\n res.masked_fill_(~text_token_mask[:, None, :], float(\"-inf\"))\n\n # padding to max_text_len\n new_res = torch.full((*res.shape[:-1], self.max_text_len), float(\"-inf\"), device=res.device)\n new_res[..., : res.shape[-1]] = res\n\n return new_res" }, { "identifier": "sigmoid_focal_loss", "path": "models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py", "snippet": "def sigmoid_focal_loss(\n inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False\n):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if no_reduction:\n return loss\n\n return loss.mean(1).sum() / num_boxes" } ]
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from models.GroundingDINO.groundingdino.util import box_ops, get_tokenlizer from models.GroundingDINO.groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from models.GroundingDINO.groundingdino.util.utils import get_phrases_from_posmap from models.GroundingDINO.groundingdino.util.visualizer import COCOVisualizer from models.GroundingDINO.groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss
11,415
self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def set_image_tensor(self, samples: NestedTensor): if isinstance(samples, (list, torch.Tensor)):
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def set_image_tensor(self, samples: NestedTensor): if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
8
2023-12-21 08:10:23+00:00
16k
chinhsuanwu/ifusion
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config, **kwargs):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**kwargs, **config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n # for i, step in enumerate(iterator):\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities import rank_zero_only from omegaconf import ListConfig from ldm.util import ( log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config, ) from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import ( normal_kl, DiagonalGaussianDistribution, ) from ldm.models.autoencoder import ( VQModelInterface, IdentityFirstStage, AutoencoderKL, ) from ldm.modules.diffusionmodules.util import ( make_beta_schedule, extract_into_tensor, noise_like, ) from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.attention import CrossAttention
12,198
opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image_cond", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image_target", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") elif self.loss_type == "smooth_l1": if mean: loss = torch.nn.functional.smooth_l1_loss(target, pred) else: loss = torch.nn.functional.smooth_l1_loss( target, pred, reduction="none" ) else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image_cond", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
7
2023-12-17 12:45:38+00:00
16k
wangzhecheng/SkyScript
customized_train_and_test.py
[ { "identifier": "create_model_and_transforms", "path": "src/open_clip/factory.py", "snippet": "def create_model_and_transforms(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = False,\n force_quick_gelu: bool = False,\n force_custom_text: bool = False,\n force_patch_dropout: Optional[float] = None,\n force_image_size: Optional[Union[int, Tuple[int, int]]] = None,\n pretrained_image: bool = False,\n pretrained_hf: bool = True,\n image_mean: Optional[Tuple[float, ...]] = None,\n image_std: Optional[Tuple[float, ...]] = None,\n aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,\n cache_dir: Optional[str] = None,\n output_dict: Optional[bool] = None,\n):\n model = create_model(\n model_name,\n pretrained,\n precision=precision,\n device=device,\n jit=jit,\n force_quick_gelu=force_quick_gelu,\n force_custom_text=force_custom_text,\n force_patch_dropout=force_patch_dropout,\n force_image_size=force_image_size,\n pretrained_image=pretrained_image,\n pretrained_hf=pretrained_hf,\n cache_dir=cache_dir,\n output_dict=output_dict,\n )\n\n image_mean = image_mean or getattr(model.visual, 'image_mean', None)\n image_std = image_std or getattr(model.visual, 'image_std', None)\n preprocess_train = image_transform(\n model.visual.image_size,\n is_train=True,\n mean=image_mean,\n std=image_std,\n aug_cfg=aug_cfg,\n )\n preprocess_val = image_transform(\n model.visual.image_size,\n is_train=False,\n mean=image_mean,\n std=image_std,\n )\n\n return model, preprocess_train, preprocess_val" }, { "identifier": "get_tokenizer", "path": "src/open_clip/factory.py", "snippet": "def get_tokenizer(model_name):\n if model_name.startswith(HF_HUB_PREFIX):\n tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])\n else:\n config = get_model_config(model_name)\n tokenizer = HFTokenizer(\n config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize\n return tokenizer" }, { "identifier": "create_loss", "path": "src/open_clip/factory.py", "snippet": "def create_loss(args):\n if args.distill:\n return DistillClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n elif \"coca\" in args.model.lower():\n return CoCaLoss(\n caption_loss_weight=args.coca_caption_loss_weight,\n clip_loss_weight=args.coca_contrastive_loss_weight,\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )\n return ClipLoss(\n local_loss=args.local_loss,\n gather_with_grad=args.gather_with_grad,\n cache_labels=True,\n rank=args.rank,\n world_size=args.world_size,\n use_horovod=args.horovod,\n )" }, { "identifier": "trace_model", "path": "src/open_clip/model.py", "snippet": "def trace_model(model, batch_size=256, device=torch.device('cpu')):\n model.eval()\n image_size = model.visual.image_size\n example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)\n example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)\n model = torch.jit.trace_module(\n model,\n inputs=dict(\n forward=(example_images, example_text),\n encode_text=(example_text,),\n encode_image=(example_images,)\n ))\n model.visual.image_size = image_size\n return model" }, { "identifier": "get_data", "path": "src/training/data.py", "snippet": "def get_data(args, preprocess_fns, epoch=0, tokenizer=None):\n preprocess_train, preprocess_val = preprocess_fns\n data = {}\n\n if args.train_data or args.dataset_type == \"synthetic\":\n data[\"train\"] = get_dataset_fn(args.train_data, args.dataset_type)(\n args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)\n\n if args.val_data:\n data[\"val\"] = get_dataset_fn(args.val_data, args.dataset_type)(\n args, preprocess_val, is_train=False, tokenizer=tokenizer)\n\n if args.imagenet_val is not None:\n data[\"imagenet-val\"] = get_imagenet(args, preprocess_fns, \"val\")\n\n if args.imagenet_v2 is not None:\n data[\"imagenet-v2\"] = get_imagenet(args, preprocess_fns, \"v2\")\n\n return data" }, { "identifier": "is_master", "path": "src/training/distributed.py", "snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)" }, { "identifier": "init_distributed_device", "path": "src/training/distributed.py", "snippet": "def init_distributed_device(args):\n # Distributed training = training on more than one GPU.\n # Works in both single and multi-node scenarios.\n args.distributed = False\n args.world_size = 1\n args.rank = 0 # global rank\n args.local_rank = 0\n if args.horovod:\n assert hvd is not None, \"Horovod is not installed\"\n hvd.init()\n args.local_rank = int(hvd.local_rank())\n args.rank = hvd.rank()\n args.world_size = hvd.size()\n args.distributed = True\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n elif is_using_distributed():\n if 'SLURM_PROCID' in os.environ:\n # DDP via SLURM\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n # SLURM var -> torch.distributed vars in case needed\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n os.environ['RANK'] = str(args.rank)\n os.environ['WORLD_SIZE'] = str(args.world_size)\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n else:\n # DDP via torchrun, torch.distributed.launch\n args.local_rank, _, _ = world_info_from_env()\n torch.distributed.init_process_group(\n backend=args.dist_backend,\n init_method=args.dist_url)\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n args.distributed = True\n\n if torch.cuda.is_available():\n if args.distributed and not args.no_set_device_rank:\n device = 'cuda:%d' % args.local_rank\n else:\n device = 'cuda:0'\n torch.cuda.set_device(device)\n else:\n device = 'cpu'\n args.device = device\n device = torch.device(device)\n return device" }, { "identifier": "broadcast_object", "path": "src/training/distributed.py", "snippet": "def broadcast_object(args, obj, src=0):\n # broadcast a pickle-able python object from rank-0 to all ranks\n if args.horovod:\n return hvd.broadcast_object(obj, root_rank=src)\n else:\n if args.rank == src:\n objects = [obj]\n else:\n objects = [None]\n dist.broadcast_object_list(objects, src=src)\n return objects[0]" }, { "identifier": "setup_logging", "path": "src/training/logger.py", "snippet": "def setup_logging(log_file, level, include_host=False):\n if include_host:\n import socket\n hostname = socket.gethostname()\n formatter = logging.Formatter(\n f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n else:\n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')\n\n logging.root.setLevel(level)\n loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\n for logger in loggers:\n logger.setLevel(level)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logging.root.addHandler(stream_handler)\n\n if log_file:\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(formatter)\n logging.root.addHandler(file_handler)" }, { "identifier": "cosine_lr", "path": "src/training/scheduler.py", "snippet": "def cosine_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n e = step - warmup_length\n es = steps - warmup_length\n lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "const_lr", "path": "src/training/scheduler.py", "snippet": "def const_lr(optimizer, base_lr, warmup_length, steps):\n def _lr_adjuster(step):\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n lr = base_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "const_lr_cooldown", "path": "src/training/scheduler.py", "snippet": "def const_lr_cooldown(optimizer, base_lr, warmup_length, steps, cooldown_steps, cooldown_power=1.0, cooldown_end_lr=0.):\n def _lr_adjuster(step):\n start_cooldown_step = steps - cooldown_steps\n if step < warmup_length:\n lr = _warmup_lr(base_lr, warmup_length, step)\n else:\n if step < start_cooldown_step:\n lr = base_lr\n else:\n e = step - start_cooldown_step\n es = steps - start_cooldown_step\n # linear decay if power == 1; polynomial decay otherwise;\n decay = (1 - (e/es)) ** cooldown_power\n lr = decay * (base_lr - cooldown_end_lr) + cooldown_end_lr\n assign_learning_rate(optimizer, lr)\n return lr\n return _lr_adjuster" }, { "identifier": "train_one_epoch", "path": "src/training/train.py", "snippet": "def train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=None):\n device = torch.device(args.device)\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n\n model.train()\n if args.distill:\n dist_model.eval()\n\n data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch\n dataloader = data['train'].dataloader\n num_batches_per_epoch = dataloader.num_batches // args.accum_freq\n sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))\n\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n losses_m = {}\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n for i, batch in enumerate(dataloader):\n i_accum = i // args.accum_freq\n step = num_batches_per_epoch * epoch + i_accum\n\n if not args.skip_scheduler:\n scheduler(step)\n\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n data_time_m.update(time.time() - end)\n optimizer.zero_grad()\n\n if args.accum_freq == 1:\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out[\"logit_scale\"]\n if args.distill:\n with torch.no_grad():\n dist_model_out = dist_model(images, texts)\n model_out.update({f'dist_{k}' : v for k, v in dist_model_out.items()})\n losses = loss(**model_out, output_dict=True)\n\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n\n backward(total_loss, scaler)\n else:\n # First, cache the features without any gradient tracking.\n with torch.no_grad():\n with autocast():\n model_out = model(images, texts)\n model_out.pop(\"logit_scale\")\n for key, val in model_out.items():\n if key in accum_features:\n accum_features[key].append(val)\n else:\n accum_features[key] = [val]\n\n accum_images.append(images)\n accum_texts.append(texts)\n\n # If (i + 1) % accum_freq is not zero, move on to the next batch.\n if ((i + 1) % args.accum_freq) > 0:\n # FIXME this makes data time logging unreliable when accumulating\n continue\n\n # Now, ready to take gradients for the last accum_freq batches.\n # Re-do the forward pass for those batches, and use the cached features from the other batches as negatives.\n # Call backwards each time, but only step optimizer at the end.\n optimizer.zero_grad()\n for j in range(args.accum_freq):\n images = accum_images[j]\n texts = accum_texts[j]\n with autocast():\n model_out = model(images, texts)\n logit_scale = model_out.pop(\"logit_scale\")\n inputs = {}\n for key, val in accum_features.items():\n accumulated = accum_features[key]\n inputs[key] = torch.cat(accumulated[:j] + [model_out[key]] + accumulated[j + 1:])\n losses = loss(**inputs, logit_scale=logit_scale, output_dict=True)\n del inputs\n total_loss = sum(losses.values())\n losses[\"loss\"] = total_loss\n backward(total_loss, scaler)\n\n if scaler is not None:\n if args.horovod:\n optimizer.synchronize()\n scaler.unscale_(optimizer)\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n with optimizer.skip_synchronize():\n scaler.step(optimizer)\n else:\n if args.grad_clip_norm is not None:\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n scaler.step(optimizer)\n scaler.update()\n else:\n if args.grad_clip_norm is not None:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)\n optimizer.step()\n\n # reset gradient accum, if enabled\n if args.accum_freq > 1:\n accum_images, accum_texts, accum_features = [], [], {}\n\n # Note: we clamp to 4.6052 = ln(100), as in the original paper.\n with torch.no_grad():\n unwrap_model(model).logit_scale.clamp_(0, math.log(100))\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i_accum + 1\n if is_master(args) and (i_accum % args.log_every_n_steps == 0 or batch_count == num_batches_per_epoch):\n batch_size = len(images)\n num_samples = batch_count * batch_size * args.accum_freq * args.world_size\n samples_per_epoch = dataloader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n for key, val in losses.items():\n if key not in losses_m:\n losses_m[key] = AverageMeter()\n losses_m[key].update(val.item(), batch_size)\n\n logit_scale_scalar = logit_scale.item()\n loss_log = \" \".join(\n [\n f\"{loss_name.capitalize()}: {loss_m.val:#.5g} ({loss_m.avg:#.5g})\" \n for loss_name, loss_m in losses_m.items()\n ]\n )\n samples_per_second = args.accum_freq * args.batch_size * args.world_size / batch_time_m.val\n samples_per_second_per_gpu = args.accum_freq * args.batch_size / batch_time_m.val\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {samples_per_second:#g}/s, {samples_per_second_per_gpu:#g}/s/gpu \"\n f\"LR: {optimizer.param_groups[0]['lr']:5f} \"\n f\"Logit Scale: {logit_scale_scalar:.3f} \" + loss_log\n )\n\n # Save train loss / etc. Using non avg meter values as loggers have their own smoothing\n log_data = {\n \"data_time\": data_time_m.val,\n \"batch_time\": batch_time_m.val,\n \"samples_per_second\": samples_per_second,\n \"samples_per_second_per_gpu\": samples_per_second_per_gpu,\n \"scale\": logit_scale_scalar,\n \"lr\": optimizer.param_groups[0][\"lr\"]\n } \n log_data.update({name:val.val for name,val in losses_m.items()})\n\n for name, val in log_data.items():\n name = \"train/\" + name\n if tb_writer is not None:\n tb_writer.add_scalar(name, val, step)\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n wandb.log({name: val, 'step': step})\n\n # resetting batch / data time meters per log window\n batch_time_m.reset()\n data_time_m.reset()\n # end for" }, { "identifier": "evaluate", "path": "src/training/train.py", "snippet": "def evaluate(model, data, epoch, args, tb_writer=None):\n metrics = {}\n if not is_master(args):\n return metrics\n device = torch.device(args.device)\n model.eval()\n\n zero_shot_metrics = zero_shot_eval(model, data, epoch, args)\n metrics.update(zero_shot_metrics)\n\n autocast = get_autocast(args.precision)\n input_dtype = get_input_dtype(args.precision)\n\n if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):\n dataloader = data['val'].dataloader\n num_samples = 0\n samples_per_val = dataloader.num_samples\n\n # FIXME this does not scale past small eval datasets\n # all_image_features @ all_text_features will blow up memory and compute very quickly\n cumulative_loss = 0.0\n cumulative_gen_loss = 0.0\n all_image_features, all_text_features = [], []\n with torch.no_grad():\n for i, batch in enumerate(dataloader):\n images, texts = batch\n images = images.to(device=device, dtype=input_dtype, non_blocking=True)\n texts = texts.to(device=device, non_blocking=True)\n\n with autocast():\n model_out = model(images, texts)\n image_features = model_out[\"image_features\"]\n text_features = model_out[\"text_features\"]\n logit_scale = model_out[\"logit_scale\"]\n # features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly\n # however, system RAM is easily exceeded and compute time becomes problematic\n all_image_features.append(image_features.cpu())\n all_text_features.append(text_features.cpu())\n logit_scale = logit_scale.mean()\n logits_per_image = logit_scale * image_features @ text_features.t()\n logits_per_text = logits_per_image.t()\n\n batch_size = images.shape[0]\n labels = torch.arange(batch_size, device=device).long()\n total_loss = (\n F.cross_entropy(logits_per_image, labels) +\n F.cross_entropy(logits_per_text, labels)\n ) / 2\n\n gen_loss = maybe_compute_generative_loss(model_out)\n\n cumulative_loss += total_loss * batch_size\n num_samples += batch_size\n if is_master(args) and (i % 100) == 0:\n logging.info(\n f\"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\\t\"\n f\"Clip Loss: {cumulative_loss / num_samples:.6f}\\t\")\n\n if gen_loss is not None:\n cumulative_gen_loss += gen_loss * batch_size\n logging.info(\n f\"Generative Loss: {cumulative_gen_loss / num_samples:.6f}\\t\")\n\n val_metrics = get_clip_metrics(\n image_features=torch.cat(all_image_features),\n text_features=torch.cat(all_text_features),\n logit_scale=logit_scale.cpu(),\n )\n loss = cumulative_loss / num_samples\n metrics.update(\n {**val_metrics, \"clip_val_loss\": loss.item(), \"epoch\": epoch, \"num_samples\": num_samples}\n )\n if gen_loss is not None:\n gen_loss = cumulative_gen_loss / num_samples\n metrics.update({\"val_generative_loss\": gen_loss.item()})\n\n if not metrics:\n return metrics\n\n logging.info(\n f\"Eval Epoch: {epoch} \"\n + \"\\t\".join([f\"{k}: {round(v, 4):.4f}\" for k, v in metrics.items()])\n )\n\n if args.save_logs:\n for name, val in metrics.items():\n if tb_writer is not None:\n tb_writer.add_scalar(f\"val/{name}\", val, epoch)\n\n with open(os.path.join(args.checkpoint_path, \"results.jsonl\"), \"a+\") as f:\n f.write(json.dumps(metrics))\n f.write(\"\\n\")\n\n if args.wandb:\n assert wandb is not None, 'Please install wandb.'\n for name, val in metrics.items():\n wandb.log({f\"val/{name}\": val, 'epoch': epoch})\n\n return metrics" }, { "identifier": "pt_load", "path": "src/training/file_utils.py", "snippet": "def pt_load(file_path, map_location=None):\n if file_path.startswith('s3'):\n logging.info('Loading remote checkpoint, which may take a bit.')\n of = fsspec.open(file_path, \"rb\")\n with of as f:\n out = torch.load(f, map_location=map_location)\n return out" }, { "identifier": "check_exists", "path": "src/training/file_utils.py", "snippet": "def check_exists(file_path):\n try:\n with fsspec.open(file_path):\n pass\n except FileNotFoundError:\n return False\n return True" }, { "identifier": "start_sync_process", "path": "src/training/file_utils.py", "snippet": "def start_sync_process(sync_every, local_dir, remote_dir, protocol):\n p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol))\n return p" }, { "identifier": "remote_sync", "path": "src/training/file_utils.py", "snippet": "def remote_sync(local_dir, remote_dir, protocol):\n logging.info('Starting remote sync.')\n if protocol == 's3':\n return remote_sync_s3(local_dir, remote_dir)\n elif protocol == 'fsspec':\n return remote_sync_fsspec(local_dir, remote_dir)\n else:\n logging.error('Remote protocol not known')\n return False" }, { "identifier": "natural_key", "path": "src/training/main.py", "snippet": "def natural_key(string_):\n \"\"\"See http://www.codinghorror.com/blog/archives/001018.html\"\"\"\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_.lower())]" }, { "identifier": "get_latest_checkpoint", "path": "src/training/main.py", "snippet": "def get_latest_checkpoint(path: str, remote : bool):\n # as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders\n if remote:\n result = subprocess.run([\"aws\", \"s3\", \"ls\", path + \"/\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(result)\n if result.returncode == 1:\n return None\n checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\\n')[:-1]]\n else:\n checkpoints = glob.glob(path + '**/*.pt', recursive=True)\n if checkpoints:\n checkpoints = sorted(checkpoints, key=natural_key)\n return checkpoints[-1]\n return None" }, { "identifier": "copy_codebase", "path": "src/training/main.py", "snippet": "def copy_codebase(args):\n from shutil import copytree, ignore_patterns\n new_code_path = os.path.join(args.logs, args.name, \"code\")\n if os.path.exists(new_code_path):\n print(\n f\"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment.\"\n )\n return -1\n print(f\"Copying codebase to {new_code_path}\")\n current_code_path = os.path.realpath(__file__)\n for _ in range(3):\n current_code_path = os.path.dirname(current_code_path)\n copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))\n print(\"Done copying code.\")\n return 1" }, { "identifier": "parse_args", "path": "params.py", "snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--root-data-dir\",\n type=str,\n default=None,\n help=\"Root directory to datasets\",\n )\n parser.add_argument(\n \"--train-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.\",\n )\n parser.add_argument(\n \"--train-data-upsampling-factors\",\n type=str,\n default=None,\n help=(\n \"When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. \"\n \"Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) \"\n \"By default, datapoints are sampled uniformly regardless of the dataset sizes.\"\n )\n )\n parser.add_argument(\n \"--val-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with validation data\",\n )\n parser.add_argument(\n \"--train-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Required for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--val-num-samples\",\n type=int,\n default=None,\n help=\"Number of samples in dataset. Useful for webdataset if not available in info file.\",\n )\n parser.add_argument(\n \"--dataset-type\",\n choices=[\"webdataset\", \"csv\", \"synthetic\", \"auto\"],\n default=\"auto\",\n help=\"Which type of dataset to process.\"\n )\n parser.add_argument(\n \"--dataset-resampled\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use sampling with replacement for webdataset shard selection.\"\n )\n parser.add_argument(\n \"--csv-separator\",\n type=str,\n default=\"\\t\",\n help=\"For csv-like datasets, which separator to use.\"\n )\n parser.add_argument(\n \"--csv-img-key\",\n type=str,\n default=\"filepath\",\n help=\"For csv-like datasets, the name of the key for the image paths.\"\n )\n parser.add_argument(\n \"--csv-caption-key\",\n type=str,\n default=\"title\",\n help=\"For csv-like datasets, the name of the key for the captions.\"\n )\n parser.add_argument(\n \"--imagenet-val\",\n type=str,\n default=None,\n help=\"Path to imagenet val set for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--imagenet-v2\",\n type=str,\n default=None,\n help=\"Path to imagenet v2 for conducting zero shot evaluation.\",\n )\n parser.add_argument(\n \"--logs\",\n type=str,\n default=\"./logs/\",\n help=\"Where to store tensorboard logs. Use None to avoid storing logs.\",\n )\n parser.add_argument(\n \"--log-local\",\n action=\"store_true\",\n default=False,\n help=\"log files on local master, otherwise global master only.\",\n )\n parser.add_argument(\n \"--name\",\n type=str,\n default=None,\n help=\"Optional identifier for the experiment when storing logs. Otherwise use current time.\",\n )\n parser.add_argument(\n \"--workers\", type=int, default=1, help=\"Number of dataloader workers per GPU.\"\n )\n parser.add_argument(\n \"--batch-size\", type=int, default=64, help=\"Batch size per GPU.\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=32, help=\"Number of epochs to train for.\"\n )\n parser.add_argument(\n \"--epochs-cooldown\", type=int, default=None,\n help=\"When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards.\"\n )\n parser.add_argument(\"--lr\", type=float, default=None, help=\"Learning rate.\")\n parser.add_argument(\"--beta1\", type=float, default=None, help=\"Adam beta 1.\")\n parser.add_argument(\"--beta2\", type=float, default=None, help=\"Adam beta 2.\")\n parser.add_argument(\"--eps\", type=float, default=None, help=\"Adam epsilon.\")\n parser.add_argument(\"--wd\", type=float, default=0.2, help=\"Weight decay.\")\n parser.add_argument(\n \"--warmup\", type=int, default=10000, help=\"Number of steps to warmup for.\"\n )\n parser.add_argument(\n \"--use-bn-sync\",\n default=False,\n action=\"store_true\",\n help=\"Whether to use batch norm sync.\")\n parser.add_argument(\n \"--skip-scheduler\",\n action=\"store_true\",\n default=False,\n help=\"Use this flag to skip the learning rate decay.\",\n )\n parser.add_argument(\n \"--lr-scheduler\",\n type=str,\n default='cosine',\n help=\"LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine\",\n )\n parser.add_argument(\n \"--lr-cooldown-end\", type=float, default=0.0,\n help=\"End learning rate for cooldown schedule. Default: 0\"\n )\n parser.add_argument(\n \"--lr-cooldown-power\", type=float, default=1.0,\n help=\"Power for polynomial cooldown schedule. Default: 1.0 (linear decay)\"\n )\n parser.add_argument(\n \"--save-frequency\", type=int, default=1, help=\"How often to save checkpoints.\"\n )\n parser.add_argument(\n \"--save-most-recent\",\n action=\"store_true\",\n default=False,\n help=\"Always save the most recent model trained to epoch_latest.pt.\",\n )\n parser.add_argument(\n \"--zeroshot-frequency\", type=int, default=2, help=\"How often to run zero shot.\"\n )\n parser.add_argument(\n \"--val-frequency\", type=int, default=1, help=\"How often to run evaluation with val data.\"\n )\n parser.add_argument(\n \"--resume\",\n default=None,\n type=str,\n help=\"path to latest checkpoint (default: none)\",\n )\n parser.add_argument(\n \"--precision\",\n choices=[\"amp\", \"amp_bf16\", \"amp_bfloat16\", \"bf16\", \"fp16\", \"fp32\"],\n default=\"amp\",\n help=\"Floating point precision.\"\n )\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"RN50\",\n help=\"Name of the vision backbone to use.\",\n )\n parser.add_argument(\n \"--pretrained\",\n default='',\n type=str,\n help=\"Use a pretrained CLIP model weights with the specified tag or file path.\",\n )\n parser.add_argument(\n \"--pretrained-image\",\n default=False,\n action='store_true',\n help=\"Load imagenet pretrained weights for image tower backbone if available.\",\n )\n parser.add_argument(\n \"--lock-image\",\n default=False,\n action='store_true',\n help=\"Lock full image tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-image-unlocked-groups\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-image-freeze-bn-stats\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n '--image-mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override default image mean value of dataset')\n parser.add_argument(\n '--image-std', type=float, nargs='+', default=None, metavar='STD',\n help='Override default image std deviation of of dataset')\n parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)\n parser.add_argument(\n \"--grad-checkpointing\",\n default=False,\n action='store_true',\n help=\"Enable gradient checkpointing.\",\n )\n parser.add_argument(\n \"--local-loss\",\n default=False,\n action=\"store_true\",\n help=\"calculate loss w/ local features @ global (instead of realizing full global @ global matrix)\"\n )\n parser.add_argument(\n \"--gather-with-grad\",\n default=False,\n action=\"store_true\",\n help=\"enable full distributed gradient for feature gather\"\n )\n parser.add_argument(\n '--force-image-size', type=int, nargs='+', default=None,\n help='Override default image size'\n )\n parser.add_argument(\n \"--force-quick-gelu\",\n default=False,\n action='store_true',\n help=\"Force use of QuickGELU activation for non-OpenAI transformer models.\",\n )\n parser.add_argument(\n \"--force-patch-dropout\",\n default=None,\n type=float,\n help=\"Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper\",\n )\n parser.add_argument(\n \"--force-custom-text\",\n default=False,\n action='store_true',\n help=\"Force use of CustomTextCLIP model (separate text-tower).\",\n )\n parser.add_argument(\n \"--torchscript\",\n default=False,\n action='store_true',\n help=\"torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'\",\n )\n parser.add_argument(\n \"--trace\",\n default=False,\n action='store_true',\n help=\"torch.jit.trace the model for inference / eval only\",\n )\n parser.add_argument(\n \"--accum-freq\", type=int, default=1, help=\"Update the model every --acum-freq steps.\"\n )\n # arguments for distributed training\n parser.add_argument(\n \"--dist-url\",\n default=\"env://\",\n type=str,\n help=\"url used to set up distributed training\",\n )\n parser.add_argument(\n \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n )\n parser.add_argument(\n \"--report-to\",\n default='',\n type=str,\n help=\"Options are ['wandb', 'tensorboard', 'wandb,tensorboard']\"\n )\n parser.add_argument(\n \"--wandb-notes\",\n default='',\n type=str,\n help=\"Notes if logging with wandb\"\n )\n parser.add_argument(\n \"--wandb-project-name\",\n type=str,\n default='open-clip',\n help=\"Name of the project if logging with wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n default=False,\n action=\"store_true\",\n help=\"If true, more information is logged.\"\n )\n parser.add_argument(\n \"--copy-codebase\",\n default=False,\n action=\"store_true\",\n help=\"If true, we copy the entire base on the log directory, and execute from there.\"\n )\n parser.add_argument(\n \"--horovod\",\n default=False,\n action=\"store_true\",\n help=\"Use horovod for distributed training.\"\n )\n parser.add_argument(\n \"--ddp-static-graph\",\n default=False,\n action='store_true',\n help=\"Enable static graph optimization for DDP in PyTorch >= 1.11.\",\n )\n parser.add_argument(\n \"--no-set-device-rank\",\n default=False,\n action=\"store_true\",\n help=\"Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=0, help=\"Default random seed.\"\n )\n parser.add_argument(\n \"--grad-clip-norm\", type=float, default=None, help=\"Gradient clip.\"\n )\n parser.add_argument(\n \"--lock-text\",\n default=False,\n action='store_true',\n help=\"Lock full text tower by disabling gradients.\",\n )\n parser.add_argument(\n \"--lock-text-unlocked-layers\",\n type=int,\n default=0,\n help=\"Leave last n image tower layer groups unlocked.\",\n )\n parser.add_argument(\n \"--lock-text-freeze-layer-norm\",\n default=False,\n action='store_true',\n help=\"Freeze BatchNorm running stats in image tower for any locked layers.\",\n )\n parser.add_argument(\n \"--log-every-n-steps\",\n type=int,\n default=100,\n help=\"Log every n steps to tensorboard/console/wandb.\",\n )\n parser.add_argument(\n \"--coca-caption-loss-weight\",\n type=float,\n default=2.0,\n help=\"Weight assigned to caption loss in CoCa.\"\n )\n parser.add_argument(\n \"--coca-contrastive-loss-weight\",\n type=float,\n default=1.0,\n help=\"Weight assigned to contrastive loss when training CoCa.\"\n )\n parser.add_argument(\n \"--remote-sync\",\n type=str,\n default=None,\n help=\"Optinoally sync with a remote path specified by this arg\",\n )\n parser.add_argument(\n \"--remote-sync-frequency\",\n type=int,\n default=300,\n help=\"How frequently to sync to a remote directly if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--remote-sync-protocol\",\n choices=[\"s3\", \"fsspec\"],\n default=\"s3\",\n help=\"How to do the remote sync backup if --remote-sync is not None.\",\n )\n parser.add_argument(\n \"--delete-previous-checkpoint\",\n default=False,\n action=\"store_true\",\n help=\"If true, delete previous checkpoint after storing a new one.\"\n )\n parser.add_argument(\n \"--distill-model\",\n default=None,\n help='Which model arch to distill from, if any.'\n )\n parser.add_argument(\n \"--distill-pretrained\",\n default=None,\n help='Which pre-trained weights to distill from, if any.'\n )\n # newly added flag for adding random rotation into data augmentation\n parser.add_argument(\n \"--random-rotation\",\n action=\"store_true\",\n default=False,\n help=\"If True, add random rotation into image transform for data augmentation (only for training).\"\n )\n # newly added for testing zero-shot and linear probe classification (custom dataset)\n parser.add_argument(\n \"--datasets-for-testing\",\n nargs='*',\n type=str,\n default=None,\n help=\"A list of names of datasets for testing zero-shot classification testing\",\n )\n parser.add_argument(\n \"--classification-mode\",\n type=str,\n default=\"multiclass\",\n help=\"Choose either binary or multiclass\",\n )\n parser.add_argument(\n \"--test-data\",\n type=str,\n default=None,\n help=\"Path to file(s) with test data (e.g., for testing zero-shot classification)\",\n )\n parser.add_argument(\n \"--classnames\",\n type=str,\n default=None,\n help=\"Path to txt file containing class names\",\n )\n parser.add_argument(\n \"--test-data-name\",\n type=str,\n default=None,\n help=\"The name of the test data (e.g., RSICD, EuroSat)\",\n )\n parser.add_argument(\n \"--csv-class-key\",\n type=str,\n default=\"label\",\n help=\"For csv-like datasets, the name of the key for image labels (for classification).\"\n )\n parser.add_argument(\n \"--csv-actual-label-key\",\n type=str,\n default=\"binary\",\n help=\"If classification_model=binary, then specify the name of the key for actual binary labels (i.e., 0/1).\"\n )\n parser.add_argument(\n \"--alpha\",\n type=float,\n default=None,\n help=\"The regularization multiplier of logistic regression to try for linear probing. If None, do a search.\"\n )\n parser.add_argument(\n \"--samples-per-class\",\n type=str,\n default=None,\n help=\"Numbers of samples per class to train logistic regression for linear probing. If None, use full dataset.\"\n )\n parser.add_argument(\n \"--test-result-save-path\",\n type=str,\n default=None,\n help=\"The path to save test results as a pickle file.\"\n )\n parser.add_argument(\n \"--debugging\",\n action=\"store_true\",\n default=False,\n help=\"Whether to use debugging mode, which will return more information.\"\n )\n \n args = parser.parse_args(args)\n\n # If some params are not passed, we use the default values based on model name.\n default_params = get_default_params(args.model)\n for name, val in default_params.items():\n if getattr(args, name) is None:\n setattr(args, name, val)\n\n return args" } ]
import glob import json import logging import os import re import subprocess import sys import random import numpy as np import torch import wandb import torch.utils.tensorboard as tensorboard import horovod.torch as hvd from datetime import datetime from torch import optim from torch.cuda.amp import GradScaler from torchvision import transforms from src.open_clip.factory import create_model_and_transforms, get_tokenizer, create_loss from src.open_clip.model import trace_model from src.training.data import get_data from src.training.distributed import is_master, init_distributed_device, broadcast_object from src.training.logger import setup_logging from src.training.scheduler import cosine_lr, const_lr, const_lr_cooldown from src.training.train import train_one_epoch, evaluate from src.training.file_utils import pt_load, check_exists, start_sync_process, remote_sync from src.training.main import natural_key, get_latest_checkpoint, copy_codebase from test_zero_shot_classification import * from params import parse_args
11,284
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: except ImportError: wandb = None try: except ImportError: tensorboard = None try: except ImportError: hvd = None # from src.open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss LATEST_CHECKPOINT_NAME = "epoch_latest.pt" def RandomRotationNew(image): angle = random.choice([0, 90, 180, 270]) image = transforms.functional.rotate(image, angle) return image def zero_shot_eval_during_training(model, test_dataloaders, epoch, args, tb_writer=None): logging.info('Starting zero-shot evaluation.') zero_shot_metrics = {} for dataset_name in test_dataloaders: logging.info(f'Evaluating zero-shot classification for dataset {dataset_name}') results = test_zero_shot_classification(model, test_dataloaders[dataset_name]['dataloader'], test_dataloaders[dataset_name]['labels'], test_dataloaders[dataset_name]['is_binary'], args, dataset_name=dataset_name, debugging=args.debugging) for k, v in results.items(): if type(v) in [float, int, np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: zero_shot_metrics[k] = v logging.info( f"Zero-Shot Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in zero_shot_metrics.items()]) ) if args.save_logs: for name, val in zero_shot_metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(zero_shot_metrics)) f.write("\n") # if args.wandb: # assert wandb is not None, 'Please install wandb.' # for name, val in zero_shot_metrics.items(): # wandb.log({f"val/{name}": val, 'epoch': epoch}) logging.info('Finished zero-shot evaluation.') return zero_shot_metrics def train_and_test(args): args = parse_args(args) if torch.cuda.is_available(): # This enables tf32 on Ampere GPUs which is only 8% slower than # float16 and almost as accurate as float32 # This was a default in pytorch until 1.12 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False # fully initialize distributed device environment device = init_distributed_device(args) # get the name of the experiments if args.name is None: # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") if args.distributed: # sync date_str from master to all ranks date_str = broadcast_object(args, date_str) args.name = '-'.join([ date_str, f"model_{model_name_safe}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) resume_latest = args.resume == 'latest' log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path) and not resume_latest: print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Setup text logger args.log_level = logging.DEBUG if args.debug else logging.INFO
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: except ImportError: wandb = None try: except ImportError: tensorboard = None try: except ImportError: hvd = None # from src.open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss LATEST_CHECKPOINT_NAME = "epoch_latest.pt" def RandomRotationNew(image): angle = random.choice([0, 90, 180, 270]) image = transforms.functional.rotate(image, angle) return image def zero_shot_eval_during_training(model, test_dataloaders, epoch, args, tb_writer=None): logging.info('Starting zero-shot evaluation.') zero_shot_metrics = {} for dataset_name in test_dataloaders: logging.info(f'Evaluating zero-shot classification for dataset {dataset_name}') results = test_zero_shot_classification(model, test_dataloaders[dataset_name]['dataloader'], test_dataloaders[dataset_name]['labels'], test_dataloaders[dataset_name]['is_binary'], args, dataset_name=dataset_name, debugging=args.debugging) for k, v in results.items(): if type(v) in [float, int, np.float16, np.float32, np.float64, np.int8, np.int16, np.int32, np.int64]: zero_shot_metrics[k] = v logging.info( f"Zero-Shot Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in zero_shot_metrics.items()]) ) if args.save_logs: for name, val in zero_shot_metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(zero_shot_metrics)) f.write("\n") # if args.wandb: # assert wandb is not None, 'Please install wandb.' # for name, val in zero_shot_metrics.items(): # wandb.log({f"val/{name}": val, 'epoch': epoch}) logging.info('Finished zero-shot evaluation.') return zero_shot_metrics def train_and_test(args): args = parse_args(args) if torch.cuda.is_available(): # This enables tf32 on Ampere GPUs which is only 8% slower than # float16 and almost as accurate as float32 # This was a default in pytorch until 1.12 torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False # fully initialize distributed device environment device = init_distributed_device(args) # get the name of the experiments if args.name is None: # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? model_name_safe = args.model.replace('/', '-') date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") if args.distributed: # sync date_str from master to all ranks date_str = broadcast_object(args, date_str) args.name = '-'.join([ date_str, f"model_{model_name_safe}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) resume_latest = args.resume == 'latest' log_base_path = os.path.join(args.logs, args.name) args.log_path = None if is_master(args, local=args.log_local): os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path) and not resume_latest: print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Setup text logger args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
8
2023-12-19 11:50:56+00:00
16k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/model/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "IGNORE_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "tokenizer_image_token", "path": "VisualSearch/model/llava/mm_utils.py", "snippet": "def tokenizer_image_token(\n prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None\n):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split(\"<image>\")]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if (\n len(prompt_chunks) > 0\n and len(prompt_chunks[0]) > 0\n and prompt_chunks[0][0] == tokenizer.bos_token_id\n ):\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == \"pt\":\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f\"Unsupported tensor type: {return_tensors}\")\n return input_ids" }, { "identifier": "get_mask_from_json", "path": "VisualSearch/utils/data_processing.py", "snippet": "def get_mask_from_json(json_path, img):\n try:\n with open(json_path, \"r\") as r:\n anno = json.loads(r.read())\n except:\n with open(json_path, \"r\", encoding=\"cp1252\") as r:\n anno = json.loads(r.read())\n\n inform = anno[\"shapes\"]\n comments = anno[\"text\"]\n is_sentence = anno[\"is_sentence\"]\n\n height, width = img.shape[:2]\n\n ### sort polies by area\n area_list = []\n valid_poly_list = []\n for i in inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n if \"flag\" == label_id.lower(): ## meaningless deprecated annotations\n continue\n\n tmp_mask = np.zeros((height, width), dtype=np.uint8)\n cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)\n cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)\n tmp_area = tmp_mask.sum()\n\n area_list.append(tmp_area)\n valid_poly_list.append(i)\n\n ### ground-truth mask\n sort_index = np.argsort(area_list)[::-1].astype(np.int32)\n sort_index = list(sort_index)\n sort_inform = []\n for s_idx in sort_index:\n sort_inform.append(valid_poly_list[s_idx])\n\n mask = np.zeros((height, width), dtype=np.uint8)\n for i in sort_inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n\n if \"ignore\" in label_id.lower():\n label_value = 255 # ignored during evaluation\n else:\n label_value = 1 # target\n\n cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)\n cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)\n\n return mask, comments, is_sentence" }, { "identifier": "REFER", "path": "VisualSearch/utils/refer.py", "snippet": "class REFER:\n def __init__(self, data_root, dataset=\"refcoco\", splitBy=\"unc\"):\n # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog\n # also provide dataset name and splitBy information\n # e.g., dataset = 'refcoco', splitBy = 'unc'\n print(\"loading dataset %s into memory...\" % dataset)\n self.ROOT_DIR = osp.abspath(osp.dirname(__file__))\n self.DATA_DIR = osp.join(data_root, dataset)\n if dataset in [\"refcoco\", \"refcoco+\", \"refcocog\"]:\n self.IMAGE_DIR = osp.join(data_root, \"images/mscoco/images/train2014\")\n elif dataset == \"refclef\":\n self.IMAGE_DIR = osp.join(data_root, \"images/saiapr_tc-12\")\n else:\n print(\"No refer dataset is called [%s]\" % dataset)\n sys.exit()\n\n self.dataset = dataset\n\n # load refs from data/dataset/refs(dataset).json\n tic = time.time()\n\n ref_file = osp.join(self.DATA_DIR, \"refs(\" + splitBy + \").p\")\n print(\"ref_file: \", ref_file)\n self.data = {}\n self.data[\"dataset\"] = dataset\n self.data[\"refs\"] = pickle.load(open(ref_file, \"rb\"))\n\n # load annotations from data/dataset/instances.json\n instances_file = osp.join(self.DATA_DIR, \"instances.json\")\n instances = json.load(open(instances_file, \"rb\"))\n self.data[\"images\"] = instances[\"images\"]\n self.data[\"annotations\"] = instances[\"annotations\"]\n self.data[\"categories\"] = instances[\"categories\"]\n\n # create index\n self.createIndex()\n print(\"DONE (t=%.2fs)\" % (time.time() - tic))\n\n def createIndex(self):\n # create sets of mapping\n # 1) Refs: \t \t{ref_id: ref}\n # 2) Anns: \t \t{ann_id: ann}\n # 3) Imgs:\t\t \t{image_id: image}\n # 4) Cats: \t \t{category_id: category_name}\n # 5) Sents: \t{sent_id: sent}\n # 6) imgToRefs: \t{image_id: refs}\n # 7) imgToAnns: \t{image_id: anns}\n # 8) refToAnn: \t{ref_id: ann}\n # 9) annToRef: \t{ann_id: ref}\n # 10) catToRefs: \t{category_id: refs}\n # 11) sentToRef: \t{sent_id: ref}\n # 12) sentToTokens: {sent_id: tokens}\n print(\"creating index...\")\n # fetch info from instances\n Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}\n for ann in self.data[\"annotations\"]:\n Anns[ann[\"id\"]] = ann\n imgToAnns[ann[\"image_id\"]] = imgToAnns.get(ann[\"image_id\"], []) + [ann]\n for img in self.data[\"images\"]:\n Imgs[img[\"id\"]] = img\n for cat in self.data[\"categories\"]:\n Cats[cat[\"id\"]] = cat[\"name\"]\n\n # fetch info from refs\n Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}\n Sents, sentToRef, sentToTokens = {}, {}, {}\n for ref in self.data[\"refs\"]:\n # ids\n ref_id = ref[\"ref_id\"]\n ann_id = ref[\"ann_id\"]\n category_id = ref[\"category_id\"]\n image_id = ref[\"image_id\"]\n\n # add mapping related to ref\n Refs[ref_id] = ref\n imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]\n catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]\n refToAnn[ref_id] = Anns[ann_id]\n annToRef[ann_id] = ref\n\n # add mapping of sent\n for sent in ref[\"sentences\"]:\n Sents[sent[\"sent_id\"]] = sent\n sentToRef[sent[\"sent_id\"]] = ref\n sentToTokens[sent[\"sent_id\"]] = sent[\"tokens\"]\n\n # create class members\n self.Refs = Refs\n self.Anns = Anns\n self.Imgs = Imgs\n self.Cats = Cats\n self.Sents = Sents\n self.imgToRefs = imgToRefs\n self.imgToAnns = imgToAnns\n self.refToAnn = refToAnn\n self.annToRef = annToRef\n self.catToRefs = catToRefs\n self.sentToRef = sentToRef\n self.sentToTokens = sentToTokens\n print(\"index created.\")\n\n def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=\"\"):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:\n refs = self.data[\"refs\"]\n else:\n if not len(image_ids) == 0:\n refs = [self.imgToRefs[image_id] for image_id in image_ids]\n else:\n refs = self.data[\"refs\"]\n if not len(cat_ids) == 0:\n refs = [ref for ref in refs if ref[\"category_id\"] in cat_ids]\n if not len(ref_ids) == 0:\n refs = [ref for ref in refs if ref[\"ref_id\"] in ref_ids]\n if not len(split) == 0:\n if split in [\"testA\", \"testB\", \"testC\"]:\n refs = [\n ref for ref in refs if split[-1] in ref[\"split\"]\n ] # we also consider testAB, testBC, ...\n elif split in [\"testAB\", \"testBC\", \"testAC\"]:\n refs = [\n ref for ref in refs if ref[\"split\"] == split\n ] # rarely used I guess...\n elif split == \"test\":\n refs = [ref for ref in refs if \"test\" in ref[\"split\"]]\n elif split == \"train\" or split == \"val\":\n refs = [ref for ref in refs if ref[\"split\"] == split]\n else:\n print(\"No such split [%s]\" % split)\n sys.exit()\n ref_ids = [ref[\"ref_id\"] for ref in refs]\n return ref_ids\n\n def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:\n ann_ids = [ann[\"id\"] for ann in self.data[\"annotations\"]]\n else:\n if not len(image_ids) == 0:\n lists = [\n self.imgToAnns[image_id]\n for image_id in image_ids\n if image_id in self.imgToAnns\n ] # list of [anns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.data[\"annotations\"]\n if not len(cat_ids) == 0:\n anns = [ann for ann in anns if ann[\"category_id\"] in cat_ids]\n ann_ids = [ann[\"id\"] for ann in anns]\n if not len(ref_ids) == 0:\n ids = set(ann_ids).intersection(\n set([self.Refs[ref_id][\"ann_id\"] for ref_id in ref_ids])\n )\n return ann_ids\n\n def getImgIds(self, ref_ids=[]):\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if not len(ref_ids) == 0:\n image_ids = list(set([self.Refs[ref_id][\"image_id\"] for ref_id in ref_ids]))\n else:\n image_ids = self.Imgs.keys()\n return image_ids\n\n def getCatIds(self):\n return self.Cats.keys()\n\n def loadRefs(self, ref_ids=[]):\n if type(ref_ids) == list:\n return [self.Refs[ref_id] for ref_id in ref_ids]\n elif type(ref_ids) == int:\n return [self.Refs[ref_ids]]\n\n def loadAnns(self, ann_ids=[]):\n if type(ann_ids) == list:\n return [self.Anns[ann_id] for ann_id in ann_ids]\n elif type(ann_ids) == int or type(ann_ids) == unicode:\n return [self.Anns[ann_ids]]\n\n def loadImgs(self, image_ids=[]):\n if type(image_ids) == list:\n return [self.Imgs[image_id] for image_id in image_ids]\n elif type(image_ids) == int:\n return [self.Imgs[image_ids]]\n\n def loadCats(self, cat_ids=[]):\n if type(cat_ids) == list:\n return [self.Cats[cat_id] for cat_id in cat_ids]\n elif type(cat_ids) == int:\n return [self.Cats[cat_ids]]\n\n def getRefBox(self, ref_id):\n ref = self.Refs[ref_id]\n ann = self.refToAnn[ref_id]\n return ann[\"bbox\"] # [x, y, w, h]\n\n def showRef(self, ref, seg_box=\"seg\"):\n ax = plt.gca()\n # show image\n image = self.Imgs[ref[\"image_id\"]]\n I = io.imread(osp.join(self.IMAGE_DIR, image[\"file_name\"]))\n ax.imshow(I)\n # show refer expression\n for sid, sent in enumerate(ref[\"sentences\"]):\n print(\"%s. %s\" % (sid + 1, sent[\"sent\"]))\n # show segmentations\n if seg_box == \"seg\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n polygons = []\n color = []\n c = \"none\"\n if type(ann[\"segmentation\"][0]) == list:\n # polygon used for refcoco*\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((len(seg) / 2, 2))\n polygons.append(Polygon(poly, True, alpha=0.4))\n color.append(c)\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 1, 0, 0),\n linewidths=3,\n alpha=1,\n )\n ax.add_collection(p) # thick yellow polygon\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 0, 0, 0),\n linewidths=1,\n alpha=1,\n )\n ax.add_collection(p) # thin red polygon\n else:\n # mask used for refclef\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n # show bounding-box\n elif seg_box == \"box\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n bbox = self.getRefBox(ref[\"ref_id\"])\n box_plot = Rectangle(\n (bbox[0], bbox[1]),\n bbox[2],\n bbox[3],\n fill=False,\n edgecolor=\"green\",\n linewidth=3,\n )\n ax.add_patch(box_plot)\n\n def getMask(self, ref):\n # return mask, area and mask-center\n ann = self.refToAnn[ref[\"ref_id\"]]\n image = self.Imgs[ref[\"image_id\"]]\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(ann[\"segmentation\"], image[\"height\"], image[\"width\"])\n else:\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n # compute area\n area = sum(mask.area(rle)) # should be close to ann['area']\n return {\"mask\": m, \"area\": area}\n # # position\n # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style)\n # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style)\n # # mass position (if there were multiple regions, we use the largest one.)\n # label_m = label(m, connectivity=m.ndim)\n # regions = regionprops(label_m)\n # if len(regions) > 0:\n # \tlargest_id = np.argmax(np.array([props.filled_area for props in regions]))\n # \tlargest_props = regions[largest_id]\n # \tmass_y, mass_x = largest_props.centroid\n # else:\n # \tmass_x, mass_y = position_x, position_y\n # # if centroid is not in mask, we find the closest point to it from mask\n # if m[mass_y, mass_x] != 1:\n # \tprint('Finding closes mask point ...')\n # \tkernel = np.ones((10, 10),np.uint8)\n # \tme = cv2.erode(m, kernel, iterations = 1)\n # \tpoints = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style\n # \tpoints = np.array(points)\n # \tdist = np.sum((points - (mass_y, mass_x))**2, axis=1)\n # \tid = np.argsort(dist)[0]\n # \tmass_y, mass_x = points[id]\n # \t# return\n # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y}\n # # show image and mask\n # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))\n # plt.figure()\n # plt.imshow(I)\n # ax = plt.gca()\n # img = np.ones( (m.shape[0], m.shape[1], 3) )\n # color_mask = np.array([2.0,166.0,101.0])/255\n # for i in range(3):\n # img[:,:,i] = color_mask[i]\n # ax.imshow(np.dstack( (img, m*0.5) ))\n # plt.show()\n\n def showMask(self, ref):\n M = self.getMask(ref)\n msk = M[\"mask\"]\n ax = plt.gca()\n ax.imshow(msk)" }, { "identifier": "ReferSegDataset", "path": "VisualSearch/utils/refer_seg_dataset.py", "snippet": "class ReferSegDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n refer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n DATA_DIR = os.path.join(base_dir, \"refer_seg\")\n self.refer_seg_ds_list = refer_seg_data.split(\n \"||\"\n ) # ['refclef', 'refcoco', 'refcoco+', 'refcocog']\n self.refer_seg_data = {}\n for ds in self.refer_seg_ds_list:\n if ds == \"refcocog\":\n splitBy = \"umd\"\n else:\n splitBy = \"unc\"\n\n if ds == \"grefcoco\":\n refer_api = G_REFER(DATA_DIR, ds, splitBy)\n else:\n refer_api = REFER(DATA_DIR, ds, splitBy)\n ref_ids_train = refer_api.getRefIds(split=\"train\")\n images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)\n refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)\n\n refer_seg_ds = {}\n refer_seg_ds[\"images\"] = []\n loaded_images = refer_api.loadImgs(image_ids=images_ids_train)\n\n for item in loaded_images:\n item = item.copy()\n if ds == \"refclef\":\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/saiapr_tc-12\", item[\"file_name\"]\n )\n else:\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/mscoco/images/train2014\", item[\"file_name\"]\n )\n refer_seg_ds[\"images\"].append(item)\n refer_seg_ds[\"annotations\"] = refer_api.Anns # anns_train\n\n print(\n \"dataset {} (refs {}) (train split) has {} images and {} annotations.\".format(\n ds,\n splitBy,\n len(refer_seg_ds[\"images\"]),\n len(refer_seg_ds[\"annotations\"]),\n )\n )\n\n img2refs = {}\n for ref in refs_train:\n image_id = ref[\"image_id\"]\n img2refs[image_id] = img2refs.get(image_id, []) + [\n ref,\n ]\n refer_seg_ds[\"img2refs\"] = img2refs\n self.refer_seg_data[ds] = refer_seg_ds\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = random.randint(0, len(self.refer_seg_ds_list) - 1)\n ds = self.refer_seg_ds_list[ds]\n refer_seg_ds = self.refer_seg_data[ds]\n images = refer_seg_ds[\"images\"]\n annotations = refer_seg_ds[\"annotations\"]\n img2refs = refer_seg_ds[\"img2refs\"]\n idx = random.randint(0, len(images) - 1)\n image_info = images[idx]\n image_path = image_info[\"file_name\"]\n image_id = image_info[\"id\"]\n refs = img2refs[image_id]\n if len(refs) == 0:\n return self.__getitem__(0)\n\n sents = []\n ann_ids = []\n for ref in refs:\n for sent in ref[\"sentences\"]:\n text = sent[\"sent\"]\n sents.append(text)\n ann_ids.append(ref[\"ann_id\"])\n if len(sents) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(sents))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(sents)))\n sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()\n # sampled_ann_ids = np.vectorize(ann_ids.__getitem__)(sampled_inds).tolist()\n sampled_ann_ids = [ann_ids[ind] for ind in sampled_inds]\n sampled_classes = sampled_sents\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n for text in sampled_classes:\n text = text.strip()\n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n flag = False\n masks = []\n bboxes_labels = []\n for ann_id in sampled_ann_ids:\n if isinstance(ann_id, list):\n assert False\n flag = True\n if -1 in ann_id:\n assert len(ann_id) == 1\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n else:\n m_final = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n for ann_id_i in ann_id:\n ann = annotations[ann_id_i]\n\n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n else:\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"],\n image_info[\"height\"],\n image_info[\"width\"],\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n m_final = m_final | m\n m = m_final\n masks.append(m)\n continue\n \n ann = annotations[ann_id]\n cur_bboxes = [ann['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n \n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n masks.append(m)\n continue\n\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"], image_info[\"height\"], image_info[\"width\"]\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n masks.append(m)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n masks = np.stack(masks, axis=0)\n\n\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "SegDetDataset", "path": "VisualSearch/utils/general_segdet_dataset.py", "snippet": "class SegDetDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n general_segdet_data=\"objects365||cocostuff||paco_lvis\",\n general_segdet_sample_rate=[2,1,1]\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n self.data2list = {}\n self.data2classes = {}\n\n self.general_segdet_datas = general_segdet_data.split(\"||\")\n num_images = []\n for ds in self.general_segdet_datas:\n if ds == \"cocostuff\":\n classes, images, labels, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels, bboxes)\n elif ds == \"objects365\":\n classes, images, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, bboxes)\n else:\n classes, images, labels = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels)\n self.data2classes[ds] = classes\n num_images.append(len(images))\n sample_rate = np.array(general_segdet_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n if \"cocostuff\" in self.general_segdet_datas:\n self.cocostuff_class2index = {\n c: i for i, c in enumerate(self.data2classes[\"cocostuff\"])\n }\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.general_segdet_datas))), p=self.sample_rate)\n ds = self.general_segdet_datas[ds]\n\n if ds in [\"paco_lvis\"]:\n class_map = self.data2classes[ds]\n img_ids, coco_api = self.data2list[ds]\n idx = random.randint(0, len(img_ids) - 1)\n img_id = img_ids[idx]\n image_info = coco_api.loadImgs([img_id])[0]\n file_name = image_info[\"file_name\"]\n if ds == \"pascal_part\":\n file_name = os.path.join(\n \"VOCdevkit\", \"VOC2010\", \"JPEGImages\", file_name\n )\n image_path = os.path.join(self.base_dir, \"vlpart\", ds, file_name)\n elif ds == \"paco_lvis\":\n image_path = os.path.join(self.base_dir, \"coco2017\", file_name)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n annIds = coco_api.getAnnIds(imgIds=image_info[\"id\"])\n anns = coco_api.loadAnns(annIds)\n anns_category2instances = dict()\n for ann in anns:\n category_id = ann['category_id']\n if category_id not in anns_category2instances:\n anns_category2instances[category_id] = []\n anns_category2instances[category_id].append(ann)\n if len(anns_category2instances) == 0:\n return self.__getitem__(0)\n if len(anns_category2instances) >= self.num_classes_per_sample:\n sampled_anns = np.random.choice(\n list(anns_category2instances.keys()), size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_anns = list(anns_category2instances.keys())\n sampled_classes = []\n for category_id in sampled_anns:\n sampled_cls = class_map[category_id]\n if isinstance(sampled_cls, tuple):\n obj, part = sampled_cls\n if random.random() < 0.5:\n name = obj + \" \" + part\n else:\n name = \"the {} of the {}\".format(part, obj)\n else:\n name = sampled_cls\n name = name.replace('_', ' ')\n sampled_classes.append(name)\n\n elif ds in [\"cocostuff\"]:\n image, labels, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n label_path = labels[idx]\n bboxes = bboxes_all[idx]\n label = Image.open(label_path)\n label = np.array(label)\n if ds == \"ade20k\":\n label[label == 0] = 255\n label -= 1\n label[label == 254] = 255\n elif ds == \"cocostuff\":\n for c, i in self.cocostuff_class2index.items():\n if \"-\" in c:\n label[label == i] = 255\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = np.unique(label).tolist()\n if 255 in unique_label:\n unique_label.remove(255)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n elif ds in ['objects365']:\n image, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n bboxes = bboxes_all[idx]\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = set()\n for bbox_info in bboxes:\n unique_label.add(bbox_info['category_id'])\n unique_label = list(unique_label)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n\n questions = []\n answers = []\n class_ids = []\n bboxes_labels = []\n for i, sampled_cls in enumerate(sampled_classes):\n text = sampled_cls\n if ds in ['objects365']:\n text = random.sample(text.split('/'), 1)[0]\n \n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n\n answers.append(random.choice(self.answer_list))\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n category_id = sampled_anns[i]\n cur_bboxes = [instance['bbox'] for instance in anns_category2instances[category_id]]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n continue\n\n class_id = self.data2classes[ds].tolist().index(sampled_cls)\n class_ids.append(class_id)\n if ds in ['objects365']:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id'] == class_id]\n else:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id']-1 == class_id]\n cur_bboxes = cur_bboxes[:100]\n assert len(cur_bboxes) > 0\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n masks = []\n for category_id in sampled_anns:\n try:\n cur_anns = anns_category2instances[category_id]\n cur_mask = None\n for ann in cur_anns:\n if cur_mask is None:\n cur_mask = coco_api.annToMask(ann)\n else:\n cur_mask = cur_mask | coco_api.annToMask(ann)\n assert cur_mask is not None\n masks.append(cur_mask)\n except Exception as e:\n print(e)\n return self.__getitem__(0)\n\n masks = np.stack(masks, axis=0)\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n elif ds in ['objects365']:\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n masks_valid = [0]*len(bboxes_labels)\n else:\n label = torch.from_numpy(label).long()\n masks = []\n for class_id in class_ids:\n masks.append(label == class_id)\n masks = torch.stack(masks, dim=0)\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "MixedGroundingDataset", "path": "VisualSearch/utils/mixed_grounding_dataset.py", "snippet": "class MixedGroundingDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n ):\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n with open(os.path.join(base_dir, 'MixedGrounding', 'goldG_train.json')) as f:\n self.images = json.load(f)\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n\n idx = random.randint(0, len(self.images) - 1)\n image_info = self.images[idx]\n image_data_source = image_info['data_source']\n file_name = image_info[\"file_name\"]\n assert image_data_source in ['coco', 'vg', 'flickr']\n if image_data_source == 'coco':\n image_path = os.path.join(self.base_dir, 'coco2014/train2014', file_name)\n elif image_data_source == 'vg':\n image_path = os.path.join(self.base_dir, 'MixedGrounding/GQA/images', file_name)\n else:\n image_path = os.path.join(self.base_dir, 'MixedGrounding/flickr30k-images', file_name)\n caption = image_info['caption']\n instances = image_info['instances']\n if len(instances) == 0:\n return self.__getitem__(0)\n\n if len(instances) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(instances))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(instances)))\n\n sampled_classes = sampled_inds\n \n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n bboxes_labels = []\n for sample_ind in sampled_inds:\n text = []\n tokens_positive = instances[sample_ind]['tokens_positive']\n for token in tokens_positive:\n text.append(caption[token[0]:token[1]])\n text = \" \".join(text)\n text = text.strip()\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n cur_bboxes = [instances[sample_ind]['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n \n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [0]*len(bboxes_labels)\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "VQADataset", "path": "VisualSearch/utils/vqa_dataset.py", "snippet": "class VQADataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_image_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n vqa_data=\"possible_locations_conv_86k||llava_instruct_150k\",\n vqa_sample_rate=[2,1],\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_image_dir = base_image_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n DATA_DIR = os.path.join(base_image_dir, \"vsm_vqa_data\")\n self.vqa_image_root = os.path.join(base_image_dir, \"coco2017/train2017\")\n vqa_datas = vqa_data.split(\"||\")\n self.vqa_datas = []\n for data in vqa_datas:\n with open(os.path.join(DATA_DIR, \"{}.json\".format(data))) as f:\n data = json.load(f)\n self.vqa_datas.append(data)\n sample_rate = np.array(vqa_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.vqa_datas))), p=self.sample_rate)\n ds = self.vqa_datas[ds]\n idx = random.randint(0, len(ds) - 1)\n item = ds[idx]\n image_path = os.path.join(self.vqa_image_root, item[\"image\"])\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ori_size = image.shape[:2]\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n conv = conversation_lib.default_conversation.copy()\n source = item[\"conversations\"]\n source = preprocess_multimodal(\n copy.deepcopy(source),\n mm_use_im_start_end=conv.sep_style == conversation_lib.SeparatorStyle.TWO,\n )\n roles = {\"human\": conv.roles[0], \"gpt\": conv.roles[1]}\n conversations = []\n if roles[source[0][\"from\"]] != conv.roles[0]:\n # Skip the first one if it is not from human\n source = source[1:]\n conv.messages = []\n for j, sentence in enumerate(source):\n role = roles[sentence[\"from\"]]\n assert role == conv.roles[j % 2], f\"{j}\"\n conv.append_message(role, sentence[\"value\"])\n conversations.append(conv.get_prompt())\n\n questions = conversations\n sampled_classes = conversations\n\n masks = torch.rand(1, *ori_size)\n label = torch.ones(ori_size) * self.ignore_label\n bboxes_labels = [torch.tensor([[0.5,0.5,1.0,1.0]])]\n bboxes_valid = [0]\n masks_valid = [0]\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "box_xyxy_to_cxcywh", "path": "VisualSearch/utils/utils.py", "snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)" }, { "identifier": "expand2square", "path": "VisualSearch/utils/utils.py", "snippet": "def expand2square(pil_img, background_color):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode, (width, width), background_color)\n result.paste(pil_img, (0, 0))\n return result\n else:\n result = Image.new(pil_img.mode, (height, height), background_color)\n result.paste(pil_img, (0, 0))\n return result" } ]
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
14,360
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [
tokenizer_image_token(prompt, tokenizer, return_tensors="pt")
4
2023-12-15 14:58:24+00:00
16k
foocker/Bert-VITS2-Faster
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_spk: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.zeros(1024, len(phone))\n en_bert = torch.zeros(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.zeros(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.zeros(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n # torch.save(self.enc_p.state_dict(), 'enc_p.pth')\n logw = self.sdp(x, x_mask, g=g, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n # torch.save(self.sdp.state_dict(), 'sdp.pth')\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n # y_lenghts 变更了\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n \n def infer_export(\n self,\n path,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None):\n \n x_cp = torch.LongTensor(x.clone().cpu())\n x_lengths_cp = torch.LongTensor(x_lengths.clone().cpu())\n sid_cp = torch.LongTensor(sid.clone().cpu())\n tone_cp = torch.LongTensor(tone.clone().cpu())\n language_cp = torch.LongTensor(language.clone().cpu())\n bert_cp = bert.clone().cpu()\n ja_bert_cp = ja_bert.clone().cpu()\n en_bert_cp = en_bert.clone().cpu()\n \n exported_onnx_dir = \"onnx_exports\"\n if not os.path.exists(f'{exported_onnx_dir}/{path}'):\n os.makedirs(f'{exported_onnx_dir}/{path}', exist_ok=True)\n print(f'{exported_onnx_dir}/{path}')\n \n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n self.emb_g.cpu()\n torch.onnx.export(\n self.emb_g,\n (sid_cp),\n f\"{exported_onnx_dir}/{path}/emb.onnx\",\n input_names=[\"sid\"],\n output_names=[\"g\"],\n verbose=False,\n opset_version=17,\n \n )\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n self.emb_g.to('cuda')\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n self.enc_p.eval()\n self.enc_p.to('cpu')\n\n torch.onnx.export(\n self.enc_p,\n (x_cp, x_lengths_cp, tone_cp, language_cp, bert_cp, ja_bert_cp, en_bert_cp, g.cpu()),\n f\"{exported_onnx_dir}/{path}/enc.onnx\",\n input_names=[\n \"x\",\n \"x_lengths\",\n \"tone\",\n \"language\",\n \"bert\",\n \"ja_bert\",\n \"en_bert\",\n \"g\",\n ],\n output_names=[\"xout\", \"m_p\", \"logs_p\", \"x_mask\"],\n dynamic_axes={\n \"x\": [1],\n \"x_lengths\": [0],\n \"tone\": [1],\n \"language\": [1],\n \"bert\": [2],\n \"ja_bert\": [2],\n \"en_bert\": [2],\n \"xout\": [2],\n \"m_p\": [2],\n \"logs_p\": [2],\n \"x_mask\": [2],\n },\n verbose=False,\n opset_version=17,\n )\n\n self.enc_p.to('cuda')\n print('start sdp!')\n \n logw = self.sdp(x, x_mask, g=g, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n\n self.sdp.eval()\n self.sdp.to('cpu')\n self.dp.to('cpu')\n\n noise_scale_w = 0.8*torch.ones((1,), dtype=torch.float32)\n \n # \n # sdp_state_dict = self.sdp.state_dict()\n # torch.save(sdp_state_dict, 'sdp_weights.pth')\n \n torch.onnx.export(\n self.sdp,\n (x.cpu(), x_mask.cpu(), g.cpu(), noise_scale_w.cpu()),\n f\"{exported_onnx_dir}/{path}/sdp.onnx\",\n input_names=[\"x\", \"x_mask\", \"g\", \"noise_scale_w\"],\n output_names=[\"logw\"],\n # dynamic_axes={\"x\": [0, 2], \"x_mask\": [0, 2], \"logw\": [0, 2]},\n dynamic_axes={\"x\": [2], \"x_mask\": [2], \"logw\": [2]},\n verbose=False,\n opset_version=17\n )\n torch.onnx.export(\n self.dp,\n (x.cpu(), x_mask.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/dp.onnx\",\n input_names=[\"x\", \"x_mask\", \"g\"],\n output_names=[\"logw\"],\n # dynamic_axes={\"x\": [0, 2], \"x_mask\": [0, 2], \"logw\": [0, 2]},\n dynamic_axes={\"x\": [2], \"x_mask\": [2], \"logw\": [2]},\n verbose=False,\n opset_version=17,\n )\n \n self.sdp.to('cuda')\n self.dp.to('cuda')\n \n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n \n z = self.flow(z_p, y_mask, g=g, reverse=True)\n self.flow.to(\"cpu\")\n torch.onnx.export(\n self.flow,\n (z_p.cpu(), y_mask.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/flow.onnx\",\n input_names=[\"z_p\", \"y_mask\", \"g\"],\n output_names=[\"z\"],\n # dynamic_axes={\"z_p\": [0, 2], \"y_mask\": [0, 2], \"z\": [0, 2]},\n dynamic_axes={\"z_p\": [2], \"y_mask\": [2], \"z\": [2]},\n verbose=False,\n opset_version=17,\n )\n self.flow.to(\"cuda\")\n \n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n self.dec.to('cpu')\n z_in = (z * y_mask)[:, :, :max_len]\n torch.onnx.export(\n self.dec,\n (z_in.cpu(), g.cpu()),\n f\"{exported_onnx_dir}/{path}/dec.onnx\",\n input_names=[\"z_in\", \"g\"],\n output_names=[\"o\"],\n # dynamic_axes={\"z_in\": [0, 2], \"o\": [0, 2]},\n dynamic_axes={\"z_in\": [2], \"o\": [2]},\n verbose=False,\n opset_version=17,\n )\n self.dec.to('cuda')\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
13,258
if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) # 多卡训练设置 backend = "nccl" if platform.system() == "Windows": backend = "gloo" dist.init_process_group( backend=backend, init_method="env://", # If Windows,switch to gloo backend. ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank]) net_d = DDP(net_d, device_ids=[local_rank]) dur_resume_lr = None if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], find_unused_parameters=True ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, )
mel = spec_to_mel_torch(
12
2023-12-18 09:53:41+00:00
16k
sinoyou/nelf-pro
nerfstudio/models/nelfpro.py
[ { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callbak (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int):\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation):\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"\n config: TrainerConfig\n \"\"\"the trainer config\"\"\"" }, { "identifier": "TrainingCallbackLocation", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackLocation(Enum):\n \"\"\"Enum for specifying where the training callback should be run.\"\"\"\n\n BEFORE_TRAIN_ITERATION = auto()\n AFTER_TRAIN_ITERATION = auto()" }, { "identifier": "NeLFProField", "path": "nerfstudio/fields/nelfpro_field.py", "snippet": "class NeLFProField(Field):\n def __init__(\n self,\n num_images, \n\n num_basis: int, \n near_basis: int, \n dim_basis: int, \n resolution_basis: int, \n\n num_core: int,\n near_core: int,\n dim_core: int, \n resolution_core_angular: int, \n resolution_core_radial: int, \n\n freq_theta: int,\n freq_phi: int, \n\n use_appearance_embedding: bool,\n apperance_embedding_dim: int = 16,\n \n num_layers_geometry: int = 2, \n hidden_dim_geometry: int = 128, \n geo_feat_dim: int = 31, \n num_layers_color: int = 3, \n hidden_dim_color: int = 64, \n ):\n super().__init__()\n\n # config for basis factor\n self.num_basis = num_basis \n self.near_basis = near_basis\n self.dim_basis = dim_basis\n self.resolution_basis = resolution_basis\n\n # config for core factor\n self.num_core = num_core \n self.near_core = near_core \n self.dim_core = dim_core \n self.resolution_core_angular = resolution_core_angular\n self.resolution_core_radial = resolution_core_radial\n\n # config for geometry and color mlps\n self.num_layers_geometry = num_layers_geometry \n self.hidden_dim_geometry = hidden_dim_geometry \n self.geo_feat_dim = geo_feat_dim \n self.num_layers_color = num_layers_color\n self.hidden_dim_color = hidden_dim_color\n\n # config for frequency warping on basis factor fields\n self.freq_theta = freq_theta\n self.freq_phi = freq_phi\n\n # config for apperance warping\n self.use_appearance_embedding = use_appearance_embedding\n self.num_images = num_images\n\n self.field_basis = FactorField.factory(\n total_num = self.num_basis,\n feat_dim = self.dim_basis, \n size = (self.resolution_basis, self.resolution_basis * 2), \n )\n\n self.field_core_angular = FactorField.factory(\n total_num = self.num_core,\n feat_dim = self.dim_core, \n size = (self.resolution_core_angular, self.resolution_core_angular * 2), \n )\n \n self.field_core_radial = FactorField.factory(\n total_num = self.num_core,\n feat_dim = self.dim_core, \n size = (1, self.resolution_core_radial), \n )\n\n # linear mapping for basis factor\n self.mlp_projection_basis = nn.Sequential(\n nn.Linear(self.dim_basis, self.dim_core, bias=True),\n )\n\n # factor aggregation network\n self.attn_basis = nn.Sequential(nn.Linear(self.dim_core, 1), nn.Sigmoid())\n self.attn_core_angular = nn.Sequential(nn.Linear(self.dim_core, 1), nn.Sigmoid())\n self.attn_core_radial = nn.Sequential(nn.Linear(self.dim_core, 1), nn.Sigmoid())\n\n # density prediction network\n self.bn = nn.BatchNorm1d(self.dim_core)\n self.mlp_density_geometry = MLP(in_dim=self.dim_core, \n num_layers=self.num_layers_geometry, \n layer_width=self.hidden_dim_geometry,\n out_dim=self.geo_feat_dim + 1)\n\n # color prediction network\n self.direction_encoding = lambda x: rsh_cart_3(x * 2 - 1)\n self.mlp_rgb_head = MLP(in_dim=self.geo_feat_dim + 16 + 16 * self.use_appearance_embedding,\n num_layers=self.num_layers_color,\n layer_width=self.hidden_dim_color,\n out_dim=3,\n out_activation=nn.Sigmoid())\n \n # appearance embedding\n if self.use_appearance_embedding:\n self.appearance_embedding_dim = apperance_embedding_dim\n self.embedding_appearance = Embedding(self.num_images, self.appearance_embedding_dim)\n\n def get_basis_fields(self):\n return {self.field_basis.get_cuda_fields()}\n \n def get_core_fields(self, name):\n if name == 'radial':\n return {self.field_core_radial.get_cuda_fields()}\n elif name == 'angular':\n return {self.field_core_angular.get_cuda_fields()}\n else:\n raise NameError(f'core field {name} not found. ')\n \n def get_stream_field(self, name):\n if name == 'coefficient_fields':\n return self.field_basis\n elif name == 'coefficient_grids_angular':\n return self.field_core_angular\n elif name == 'coefficient_grids_radial':\n return self.field_core_radial\n else:\n raise NameError(f'stream field {name} not found. ')\n\n def get_density(self, ray_samples: RaySamples):\n \"\"\"Project ray samples points to basis and core factors, decode and get density + geometric feature.\"\"\"\n positions = ray_samples.frustums.get_positions()\n\n # check number of probes generated by data parser\n self.field_basis.check_field_number_consistency(ray_samples.probes.get_num_basis())\n self.field_core_angular.check_field_number_consistency(ray_samples.probes.get_num_core())\n self.field_core_radial.check_field_number_consistency(ray_samples.probes.get_num_core())\n \n # base field class\n self._sample_locations = positions\n if not self._sample_locations.requires_grad:\n self._sample_locations.requires_grad = True\n \n # sample factor features\n probes = ray_samples.probes\n # core factor component\n core_ret = sample_core_factor_features(\n num_near_core=self.near_core,\n field_angular=self.field_core_angular,\n field_radial=self.field_core_radial,\n probes=probes,\n positions=positions,\n camera_indices=ray_samples.camera_indices,\n return_combined=False, \n )\n # basis factor component\n basis_ret = sample_basis_factor_features(\n num_near_basis=self.near_basis, \n field=self.field_basis, \n probes=probes, \n positions=positions, \n camera_indices=ray_samples.camera_indices,\n freq_phi=self.freq_phi, \n freq_theta=self.freq_theta\n )\n \n feature_core_radial = core_ret['radial_features']\n feature_core_angular = core_ret['angular_features']\n feature_basis = basis_ret['features']\n\n # basis feature projection\n feature_basis = self.mlp_projection_basis(feature_basis) # bs, samples, near_probes, fusion_dim\n\n # factor aggregation\n weight_core_radial = self.attn_core_radial(feature_core_radial)\n fused_core_radial = torch.sum(feature_core_radial * weight_core_radial, dim=-2)\n weight_core_angular = self.attn_core_angular(feature_core_angular)\n fused_core_angular = torch.sum(feature_core_angular * weight_core_angular, dim=-2)\n weight_basis = self.attn_basis(feature_basis)\n fused_basis = torch.sum(feature_basis * weight_basis, dim=-2) # bs, samples, feat_dim\n tau = fused_core_angular * fused_core_radial * fused_basis # bs, samples, feat_dim\n tau = self.bn(tau.flatten(end_dim=-2)).view(*tau.shape[:-1], -1)\n \n # density and geometry feature decoding\n h = self.mlp_density_geometry(tau) # N_rays, N_samples, geo_feat_dim + 1 \n _density_before_activation, geometry_feat = torch.split(h, [1, self.geo_feat_dim], dim=-1)\n self._density_before_activation = _density_before_activation\n density = trunc_exp(_density_before_activation)\n\n return density, geometry_feat\n\n def get_outputs(self, ray_samples: RaySamples, density_embedding: Optional[Union[Dict, TensorType]]):\n assert density_embedding is not None\n outputs = dict()\n\n directions = ray_samples.frustums.directions\n direction_enc = self.direction_encoding((directions + 1.0) / 2.0)\n density_features = density_embedding\n h = torch.cat([density_features, direction_enc], dim=-1)\n\n if self.use_appearance_embedding:\n camera_indices = ray_samples.camera_indices.squeeze(dim=-1)\n if self.training:\n embedded_appearance = self.embedding_appearance(camera_indices)\n else:\n embedded_appearance = torch.ones(\n (*ray_samples.frustums.directions.shape[:-1], self.appearance_embedding_dim), device=ray_samples.frustums.directions.device, \n ) * self.embedding_appearance.mean(dim=0)\n h = torch.concat([h, embedded_appearance], dim=-1)\n rgb = self.mlp_rgb_head(h)\n\n outputs.update({'rgb': rgb})\n if self.training:\n outputs.update({'basis': self.field_basis.get_cuda_fields()})\n outputs.update({'core_angular': self.field_core_angular.get_cuda_fields()})\n\n return outputs\n\n def get_field_coefficients(self):\n return self.field_basis.get_cuda_fields().detach().cpu()\n \n def upsample_basis(self, target_resolution):\n self.field_basis.upsample((target_resolution, target_resolution * 2))\n\n def upsample_core(self, target_resolution):\n target_res_angular, target_res_radial = target_resolution\n self.field_core_angular.upsample((target_res_angular, target_res_angular * 2))\n self.field_core_radial.upsample((1, target_res_radial))" }, { "identifier": "NeLFDensityField", "path": "nerfstudio/fields/density_fields.py", "snippet": "class NeLFDensityField(Field):\n \"\"\"\n A lightweight density field module using spherical panorama coordinates. \n\n Args:\n num_core: number of core factors\n near_core: number of near core factors for each ray\n\n angular_resolution: resolution of the panorama\n radial_resolution: resolution of the depth direction\n feat_dim: dimension of features\n \n num_layers: number of hidden layers\n hidden_dim: dimension of hidden layers\n \"\"\"\n\n def __init__(\n self, \n num_core,\n near_core, \n angular_resolution,\n radial_resolution,\n feat_dim, \n num_layers: int = 2,\n hidden_dim: int = 64,\n ) -> None:\n super().__init__()\n\n self.field_num = num_core\n self.num_near_grids = near_core\n\n self.angular_field_stream = FactorField.factory(\n total_num=num_core,\n feat_dim=feat_dim,\n size = (angular_resolution, angular_resolution * 2), \n )\n\n self.radial_field = FactorField.factory(\n total_num=num_core,\n feat_dim=feat_dim,\n size = (1, radial_resolution),\n )\n\n self.mlp_density_geometry = []\n self.mlp_density_geometry.append(torch.nn.Linear(feat_dim, hidden_dim))\n self.mlp_density_geometry.append(torch.nn.ReLU())\n for _ in range(num_layers - 1):\n self.mlp_density_geometry.append(torch.nn.Linear(hidden_dim, hidden_dim))\n self.mlp_density_geometry.append(torch.nn.ReLU())\n self.mlp_density_geometry.append(torch.nn.Linear(hidden_dim, 1))\n self.mlp_density_geometry = torch.nn.Sequential(*self.mlp_density_geometry)\n\n def get_density(self, ray_samples: RaySamples):\n probes = ray_samples.probes\n\n ret = sample_core_factor_features(\n num_near_core=self.num_near_grids,\n field_angular=self.angular_field_stream,\n field_radial=self.radial_field,\n probes=probes,\n positions=ray_samples.frustums.get_positions(),\n camera_indices=ray_samples.camera_indices,\n )\n\n grid_field_features = ret['features']\n weighted_features = torch.mean(grid_field_features, dim=-2)\n\n density_before_activation = self.mlp_density_geometry(weighted_features.flatten(start_dim=0, end_dim=-2))\n density = trunc_exp(density_before_activation.view(*ray_samples.frustums.shape, -1))\n\n return density, None\n \n def get_outputs(self, ray_samples: RaySamples, density_embedding: Optional[TensorType] = None):\n return {}" }, { "identifier": "MSELoss", "path": "nerfstudio/model_components/losses.py", "snippet": "LOSSES = {\"L1\": L1Loss, \"MSE\": MSELoss}\nEPS = 1.0e-7\n C, D, H, W = field_parameter.shape\ndef outer(\n t0_starts: TensorType[..., \"num_samples_0\"],\n t0_ends: TensorType[..., \"num_samples_0\"],\n t1_starts: TensorType[..., \"num_samples_1\"],\n t1_ends: TensorType[..., \"num_samples_1\"],\n y1: TensorType[..., \"num_samples_1\"],\n) -> TensorType[..., \"num_samples_0\"]:\ndef lossfun_outer(\n t: TensorType[..., \"num_samples+1\"],\n w: TensorType[..., \"num_samples\"],\n t_env: TensorType[..., \"num_samples+1\"],\n w_env: TensorType[..., \"num_samples\"],\n):\ndef ray_samples_to_sdist(ray_samples):\ndef interlevel_loss(weights_list, ray_samples_list):\ndef field_tv_loss(field_parameter):\ndef lossfun_distortion(t, w):\ndef distortion_loss(weights_list, ray_samples_list):" }, { "identifier": "ProposalNetworkSampler", "path": "nerfstudio/model_components/ray_samplers.py", "snippet": "class ProposalNetworkSampler(Sampler):\n \"\"\"Sampler that uses a proposal network to generate samples.\"\"\"\n\n def __init__(\n self,\n init_sampler: str,\n num_proposal_samples_per_ray: Tuple[int] = (64,),\n num_nerf_samples_per_ray: int = 32,\n num_proposal_network_iterations: int = 2,\n single_jitter: bool = False,\n update_sched: Callable = lambda x: 1,\n ) -> None:\n super().__init__()\n\n self.num_proposal_samples_per_ray = num_proposal_samples_per_ray\n self.num_nerf_samples_per_ray = num_nerf_samples_per_ray\n self.num_proposal_network_iterations = num_proposal_network_iterations\n self.update_sched = update_sched\n if self.num_proposal_network_iterations < 1:\n raise ValueError(\"num_proposal_network_iterations must be >= 1\")\n\n if init_sampler == \"uniform\":\n self.initial_sampler = UniformSampler(single_jitter=single_jitter)\n elif init_sampler == 'log':\n self.initial_sampler = LogSampler(single_jitter=single_jitter)\n elif init_sampler == 'sqrt':\n self.initial_sampler = SqrtSampler(single_jitter=single_jitter)\n elif init_sampler == 'uniformdisp': \n self.initial_sampler = UniformLinDispPiecewiseSampler(single_jitter=single_jitter)\n else:\n raise NotImplementedError(f\"Unknown init_sampler {init_sampler}\")\n\n self.pdf_sampler = PDFSampler(include_original=False, single_jitter=single_jitter)\n\n self._anneal = 1.0\n self._steps_since_update = 0\n self._step = 0\n\n def set_anneal(self, anneal: float) -> None:\n \"\"\"Set the anneal value for the proposal network.\"\"\"\n self._anneal = anneal\n\n def step_cb(self, step):\n \"\"\"Callback to register a training step has passed. This is used to keep track of the sampling schedule\"\"\"\n self._step = step\n self._steps_since_update += 1\n\n def generate_ray_samples(\n self,\n ray_bundle: Optional[RayBundle] = None,\n density_fns: Optional[List[Callable]] = None,\n ) -> Tuple[RaySamples, List, List]:\n assert ray_bundle is not None\n assert density_fns is not None\n\n weights_list = []\n ray_samples_list = []\n\n n = self.num_proposal_network_iterations\n weights = None\n ray_samples = None\n # zinyou: 'updated' controls whether to update the field for the coarse density estimation.\n updated = self._steps_since_update > self.update_sched(self._step) or self._step < 10\n for i_level in range(n + 1):\n is_prop = i_level < n\n num_samples = self.num_proposal_samples_per_ray[i_level] if is_prop else self.num_nerf_samples_per_ray\n if i_level == 0:\n # Uniform sampling because we need to start with some samples\n ray_samples = self.initial_sampler(ray_bundle, num_samples=num_samples)\n else:\n # PDF sampling based on the last samples and their weights\n # Perform annealing to the weights. This will be a no-op if self._anneal is 1.0.\n assert weights is not None\n annealed_weights = torch.pow(weights, self._anneal)\n ray_samples = self.pdf_sampler(ray_bundle, ray_samples, annealed_weights, num_samples=num_samples)\n\n if is_prop:\n if updated:\n # always update on the first step or the inf check in grad scaling crashes\n density = density_fns[i_level](ray_samples)\n else:\n with torch.no_grad():\n density = density_fns[i_level](ray_samples)\n weights = ray_samples.get_weights(density)\n weights_list.append(weights) # (num_rays, num_samples)\n ray_samples_list.append(ray_samples)\n if updated:\n self._steps_since_update = 0\n\n assert ray_samples is not None\n return ray_samples, weights_list, ray_samples_list" }, { "identifier": "AccumulationRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class AccumulationRenderer(nn.Module):\n \"\"\"Accumulated value along a ray.\"\"\"\n\n @classmethod\n def forward(\n cls,\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 1]:\n \"\"\"Composite samples along ray and calculate accumulation.\n\n Args:\n weights: Weights for each sample\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of accumulated values.\n \"\"\"\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n accumulation = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n else:\n accumulation = torch.sum(weights, dim=-2)\n accumulation = accumulation.clip(min=0.0, max=1.0)\n return accumulation" }, { "identifier": "DepthRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class DepthRenderer(nn.Module):\n \"\"\"Calculate depth along ray.\n\n Depth Method:\n - median: Depth is set to the distance where the accumulated weight reaches 0.5.\n - expected: Expected depth along ray. Same procedure as rendering rgb, but with depth.\n\n Args:\n method: Depth calculation method.\n \"\"\"\n\n def __init__(self, method: Literal[\"median\", \"expected\"] = \"median\") -> None:\n super().__init__()\n self.method = method\n\n def forward(\n self,\n weights: TensorType[..., \"num_samples\", 1],\n ray_samples: RaySamples,\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[..., 1]:\n \"\"\"Composite samples along ray and calculate depths.\n\n Args:\n weights: Weights for each sample.\n ray_samples: Set of ray samples.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of depth values.\n \"\"\"\n\n if self.method == \"median\":\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n raise NotImplementedError(\"Median depth calculation is not implemented for packed samples.\")\n cumulative_weights = torch.cumsum(weights[..., 0], dim=-1) # [..., num_samples]\n split = torch.ones((*weights.shape[:-2], 1), device=weights.device) * 0.5 # [..., 1]\n median_index = torch.searchsorted(cumulative_weights, split, side=\"left\") # [..., 1]\n median_index = torch.clamp(median_index, 0, steps.shape[-2] - 1) # [..., 1]\n median_depth = torch.gather(steps[..., 0], dim=-1, index=median_index) # [..., 1]\n return median_depth\n if self.method == \"expected\":\n eps = 1e-10\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n depth = nerfacc.accumulate_along_rays(weights, ray_indices, steps, num_rays)\n accumulation = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n depth = depth / (accumulation + eps)\n else:\n depth = torch.sum(weights * steps, dim=-2) / (torch.sum(weights, -2) + eps)\n\n depth = torch.clip(depth, steps.min(), steps.max())\n\n return depth\n\n raise NotImplementedError(f\"Method {self.method} not implemented\")" }, { "identifier": "RGBRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class RGBRenderer(nn.Module):\n \"\"\"Standard volumetic rendering.\n\n Args:\n background_color: Background color as RGB. Uses random colors if None.\n \"\"\"\n\n def __init__(self, background_color: Union[Literal[\"random\", \"last_sample\"], TensorType[3]] = \"random\") -> None:\n super().__init__()\n self.background_color = background_color\n\n @classmethod\n def combine_rgb(\n cls,\n rgb: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n background_color: Union[Literal[\"random\", \"last_sample\"], TensorType[3]] = \"random\",\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Composite samples along ray and render color image\n\n Args:\n rgb: RGB for each sample\n weights: Weights for each sample\n background_color: Background color as RGB.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs rgb values.\n \"\"\"\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n if background_color == \"last_sample\":\n raise NotImplementedError(\"Background color 'last_sample' not implemented for packed samples.\")\n comp_rgb = nerfacc.accumulate_along_rays(weights, ray_indices, rgb, num_rays)\n accumulated_weight = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n else:\n comp_rgb = torch.sum(weights * rgb, dim=-2)\n accumulated_weight = torch.sum(weights, dim=-2)\n\n if background_color == \"last_sample\":\n background_color = rgb[..., -1, :]\n if background_color == \"random\":\n background_color = torch.rand_like(comp_rgb).to(rgb.device)\n\n assert isinstance(background_color, torch.Tensor)\n comp_rgb = comp_rgb + background_color.to(weights.device) * (1.0 - accumulated_weight)\n\n return comp_rgb\n\n def forward(\n self,\n rgb: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Composite samples along ray and render color image\n\n Args:\n rgb: RGB for each sample\n weights: Weights for each sample\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of rgb values.\n \"\"\"\n\n rgb = self.combine_rgb(\n rgb, weights, background_color=self.background_color, ray_indices=ray_indices, num_rays=num_rays\n )\n if not self.training:\n torch.clamp_(rgb, min=0.0, max=1.0)\n return rgb" }, { "identifier": "NearFarCollider", "path": "nerfstudio/model_components/scene_colliders.py", "snippet": "class NearFarCollider(SceneCollider):\n \"\"\"Sets the nears and fars with fixed values.\n\n Args:\n near_plane: distance to near plane\n far_plane: distance to far plane\n \"\"\"\n\n def __init__(self, near_plane: float, far_plane: float, **kwargs) -> None:\n self.near_plane = near_plane\n self.far_plane = far_plane\n super().__init__(**kwargs)\n\n def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:\n ones = torch.ones_like(ray_bundle.origins[..., 0:1])\n near_plane = self.near_plane if self.training else self.near_plane # 0\n ray_bundle.nears = ones * near_plane\n ray_bundle.fars = ones * self.far_plane\n return ray_bundle" }, { "identifier": "EarthCollider", "path": "nerfstudio/model_components/scene_colliders.py", "snippet": "class EarthCollider(SceneCollider):\n def __init__(self, scene_scaling_factor, **kwargs):\n # self.scene_origin = scene_origin\n self.scene_scaling_factor = scene_scaling_factor\n super().__init__(**kwargs)\n\n def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:\n rays_o = ray_bundle.origins\n rays_d = ray_bundle.directions\n\n normal = torch.tensor([0, 0, 1]).to(rays_o)\n p0_far = torch.tensor([0, 0, 0]).to(rays_o) * self.scene_scaling_factor\n p0_near = torch.tensor([0, 0, 300]).to(rays_o) * self.scene_scaling_factor\n\n near = (p0_near - rays_o * normal).sum(-1) / (rays_d * normal).sum(-1)\n far = (p0_far - rays_o * normal).sum(-1) / (rays_d * normal).sum(-1)\n near = near.clamp(min=1e-6)\n near, far = near.unsqueeze(-1), far.unsqueeze(-1)\n\n ray_bundle.nears = near\n ray_bundle.fars = far\n\n return ray_bundle" }, { "identifier": "Model", "path": "nerfstudio/models/base_model.py", "snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n world_size: int = 1,\n local_rank: int = 0,\n load_step: int = None, \n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n self.world_size = world_size\n self.local_rank = local_rank\n self.load_step = load_step\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n \n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n \n def n_parameters(self):\n return -1.0\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n \n def customized_save(self, step: int, checkpoint_dir) -> None:\n \"\"\"Call the model's customized save function.\n\n Args:\n step: Current step.\n checkpoint_dir: directory of checkpoint\n \"\"\"\n pass\n\n def customized_load(self, load_step: int, checkpoint_dir) -> None:\n \"\"\"Call the model's customized load function.\n\n Args:\n checkpoint_dir: directory of checkpoint\n \"\"\"\n pass" }, { "identifier": "ModelConfig", "path": "nerfstudio/models/base_model.py", "snippet": "class ModelConfig(InstantiateConfig):\n \"\"\"Configuration for model instantiation\"\"\"\n\n _target: Type = field(default_factory=lambda: Model)\n \"\"\"target class to instantiate\"\"\"\n enable_collider: bool = True\n \"\"\"Whether to create a scene collider to filter rays.\"\"\"\n collider_params: Optional[Dict[str, float]] = to_immutable_dict({\"near_plane\": 2.0, \"far_plane\": 6.0})\n \"\"\"parameters to instantiate scene collider with\"\"\"\n loss_coefficients: Dict[str, float] = to_immutable_dict({\"rgb_loss_coarse\": 1.0, \"rgb_loss_fine\": 1.0})\n \"\"\"parameters to instantiate density field with\"\"\"\n eval_num_rays_per_chunk: int = 4096\n \"\"\"specifies number of rays per chunk during eval\"\"\"" }, { "identifier": "colormaps", "path": "nerfstudio/utils/colormaps.py", "snippet": "def apply_colormap(image: TensorType[\"bs\":..., 1], cmap=\"viridis\") -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_depth_colormap(\n depth: TensorType[\"bs\":..., 1],\n accumulation: Optional[TensorType[\"bs\":..., 1]] = None,\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n cmap=\"turbo\",\n) -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_boolean_colormap(\n image: TensorType[\"bs\":..., 1, bool],\n true_color: TensorType[\"bs\":..., \"rgb\":3] = colors.WHITE,\n false_color: TensorType[\"bs\":..., \"rgb\":3] = colors.BLACK,\n) -> TensorType[\"bs\":..., \"rgb\":3]:" }, { "identifier": "get_color", "path": "nerfstudio/utils/colors.py", "snippet": "def get_color(color: Union[str, list]) -> TensorType[3]:\n \"\"\"\n Args:\n color (Union[str, list]): Color as a string or a rgb list\n\n Returns:\n TensorType[3]: Parsed color\n \"\"\"\n if isinstance(color, str):\n color = color.lower()\n if color not in COLORS_DICT:\n raise ValueError(f\"{color} is not a valid preset color\")\n return COLORS_DICT[color]\n if isinstance(color, list):\n if len(color) != 3:\n raise ValueError(f\"Color should be 3 values (RGB) instead got {color}\")\n return torch.tensor(color)\n\n raise ValueError(f\"Color should be an RGB list or string, instead got {type(color)}\")" } ]
from dataclasses import dataclass, field from typing import Dict, List, Tuple, Type from torch.nn import Parameter from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from typing_extensions import Literal from nerfstudio.cameras.rays import RayBundle from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.fields.nelfpro_field import NeLFProField from nerfstudio.fields.density_fields import NeLFDensityField from nerfstudio.model_components.losses import ( MSELoss, distortion_loss, interlevel_loss, field_tv_loss, ) from nerfstudio.model_components.ray_samplers import ProposalNetworkSampler from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, RGBRenderer, ) from nerfstudio.model_components.scene_colliders import NearFarCollider, EarthCollider from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.utils import colormaps from nerfstudio.utils.colors import get_color import numpy as np import torch
13,802
callbacks = [] def reinitialize_optimizer(params_name, training_callback_attributes, step): '''reinitialize optimizer and scheduler after upsampling. ''' optimizers_config = training_callback_attributes.optimizers.config data = training_callback_attributes.pipeline.get_param_groups()[params_name] lr_init = optimizers_config[params_name]["optimizer"].lr opt_state_param_groups = training_callback_attributes.optimizers.optimizers[params_name].state_dict()['param_groups'] training_callback_attributes.optimizers.optimizers[params_name] = optimizers_config[params_name]["optimizer"].setup(params=data) # note: we load_state_dict() for loading param_groups's _lr_initial, which is used for scheduler's last_epoch. training_callback_attributes.optimizers.optimizers[params_name].load_state_dict({ 'state': training_callback_attributes.optimizers.optimizers[params_name].state_dict()['state'], 'param_groups': opt_state_param_groups }) if optimizers_config[params_name]["scheduler"]: # save current state dict training_callback_attributes.optimizers.schedulers[params_name] = optimizers_config[params_name]["scheduler"].setup( optimizer=training_callback_attributes.optimizers.optimizers[params_name], lr_init=lr_init, last_epoch=step, ) # upsampling core factor if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: def upsample_core(self, training_callback_attributes: TrainingCallbackAttributes, step: int): angular_res = self.core_angular_upsamling_step.pop(0) radial_res = self.core_radial_upsamling_step.pop(0) self.field.upsample_core((angular_res, radial_res)) reinitialize_optimizer('field_core_angular', training_callback_attributes, step) reinitialize_optimizer('field_core_radial', training_callback_attributes, step) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.iters_core_upsampling, func=upsample_core, args=[self, training_callback_attributes] ) ) # update distortion loss multiplier if self.config.distortion_loss_mult_factor_max != 1.0: def update_distortion_loss_mult_factor(self, step: int): self.current_distort_loss_mult_factor = self.distortion_loss_mult_factor_step.pop(0) * self.config.distortion_loss_mult callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.distortion_loss_mult_factor_iters, func=update_distortion_loss_mult_factor, args=[self] ) ) # Proposal Network Annealing and Status Update if self.config.use_proposal_weight_anneal: N = self.config.proposal_weights_anneal_max_num_iters def set_anneal(step): train_frac = np.clip(step / N, 0, 1) bias = lambda x, b: (b * x) / ((b - 1) * x + 1) anneal = bias(train_frac, self.config.proposal_weights_anneal_slope) self.proposal_sampler.set_anneal(anneal) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.BEFORE_TRAIN_ITERATION], update_every_num_iters=1, func=set_anneal, ) ) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], update_every_num_iters=1, func=self.proposal_sampler.step_cb, ) ) return callbacks def get_outputs(self, ray_bundle: RayBundle): ray_samples, weights_list, ray_samples_list = self.proposal_sampler(ray_bundle, density_fns=self.density_fns) field_outputs = self.field(ray_samples) weights = ray_samples.get_weights(field_outputs['density']) weights_list.append(weights) ray_samples_list.append(ray_samples) rgb = self.renderer_rgb(rgb=field_outputs['rgb'], weights=weights) depth = self.renderer_depth(weights=weights, ray_samples=ray_samples) accumulation = self.renderer_accumulation(weights=weights) outputs = { "rgb": rgb, "accumulation": accumulation, "depth": depth, } if self.training: outputs["tensors"] = { 'basis': field_outputs['basis'], 'core_angular': field_outputs['core_angular'], } # These use a lot of GPU memory, so we avoid storing them for eval. if self.training: outputs["weights_list"] = weights_list outputs["ray_samples_list"] = ray_samples_list for i in range(self.config.num_proposal_iterations): outputs[f"prop_depth_{i}"] = self.renderer_depth(weights=weights_list[i], ray_samples=ray_samples_list[i]) return outputs def get_metrics_dict(self, outputs, batch): metrics_dict = {} image = batch["image"].to(self.device) metrics_dict["psnr"] = self.psnr(outputs["rgb"], image) if self.training:
from __future__ import annotations @dataclass class NeLFProModelConfig(ModelConfig): _target: Type = field(default_factory=lambda: NeLFProModel) # basis factor configs num_basis: int = 64 """Number of basis factors.""" near_basis: int = 16 """Number of near basis factors for each ray. """ dim_basis: int = 2 """Feature dimension of basis factor. """ resolution_basis: int = 256 """Tensor resolution of basis factor. """ freq_theta: float = 1 """Frequency of multiplicative warping for theta. """ freq_phi: float = 1 """Frequency of multiplicative warping for phi.""" # core factor configs num_core: int = 3 """Number of core factors.""" near_core: int = 3 """Number of near core factors for each ray.""" dim_core: int = 32 """Feature dimension of core factor.""" resolution_core_angular: int = 64 """Initial tensor resolution of angular core factor. (a.k.a theta and phi)""" resolution_max_core_angular: int = 128 """Max tensor resolution of angular core factor. (a.k.a theta and phi)""" resolution_core_radial: int = 64 """Initial tensor resolution of radial core factor. (a.k.a depth direction)""" resolution_max_core_radial: int = 1024 """Max tensor resolution of radial core factor. (a.k.a depth direction)""" iters_core_upsampling: Tuple[int, ...] = (2000, 3000, 4000, 5500, 7000) """Iterations for upsampling core factor. """ # apperance embedding settings use_appearance_embedding: bool = False """Whether to use appearance embedding. """ # sampler config near_plane: float = 0.05 """Near plane for initial ray sampler. """ far_plane: float = 1000.0 """Far plane for initial ray sampler.""" use_earth_collider: bool = False """Whether to use earth model collider, must pass scene-specific scale. (e.g. for bungeenerf dataset)""" earth_collider_scale: float = 1.0 """Scale of the earth model collider. """ use_single_jitter: bool = False """Whether to use single jitter for initial ray sampler.""" init_sampler: Literal['uniform', 'sqrt', 'log', 'uniformdisp'] = 'uniformdisp' """Initial ray sampler function type. """ # proposal network config num_proposal_iterations: int = 2 """Number of proposal network iterations.""" num_proposal_samples_per_ray_1: int = 256 """Number of proposal samples per ray for the #1 proposal network iteration.""" num_proposal_samples_per_ray_2: int = 96 """Number of proposal samples per ray for the #2 proposal network iteration.""" num_nerf_samples_per_ray: int = 48 """Number of nerf samples per ray for the main field. """ proposal_update_every: int = 5 """Update proposal network every # iterations.""" proposal_warmup: int = 5000 """Warmup proposal network by linear learning rate for the first # iterations.""" proposal_net_args_list: List[Dict] = field( default_factory=lambda: [ {"angular_resolution": 64, "radial_resolution": 128, "feat_dim": 8, }, {"angular_resolution": 128, "radial_resolution": 256, "feat_dim": 8, }, ] ) """List of proposal network arguments for each proposal networks""" proposal_weights_anneal_slope: float = 10.0 """Slope of the annealing function for the proposal weights.""" proposal_weights_anneal_max_num_iters: int = 100 """Max num iterations for the annealing function.""" use_proposal_weight_anneal: bool = True """Whether to use proposal weight annealing.""" # rendering and training config background_color: Literal["random", "last_sample", "white", "black"] = "random" """Background color for rendering when accumulation doesn't reach 1.""" interlevel_loss_mult: float = 1.0 """Interlevel loss multiplier.""" distortion_loss_mult: float = 0.002 """Initial distortion loss multiplier.""" distortion_loss_mult_factor_max: int = 1 """Max multiplication factor for distortion loss multiplier.""" distortion_loss_mult_factor_iters: Tuple[int, ...] = (500, 1000, 2000, 4000) """Iterations for upsampling distortion loss multiplier.""""" basis_tv_loss_mult: float = 0.0 """Tv loss multiplier for basis factor.""" core_tv_loss_mult: float = 0.0 """Tv loss multiplier for core factor.""" def get_upsample_steps(res_base, res_max, num_iters): x = ( np.round( np.exp( np.linspace( np.log(res_base), np.log(res_max), num_iters + 1, ) ) ).astype("int").tolist()[1:] ) return x class NeLFProModel(Model): config: NeLFProModelConfig def populate_modules(self): """Set the fields and modules.""" super().populate_modules() # Resolution and Loss Multiplier Upsampling Config if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: self.core_angular_upsamling_step = get_upsample_steps(self.config.resolution_core_angular, self.config.resolution_max_core_angular, len(self.config.iters_core_upsampling)) self.core_radial_upsamling_step = get_upsample_steps(self.config.resolution_core_radial, self.config.resolution_max_core_radial, len(self.config.iters_core_upsampling)) if self.config.distortion_loss_mult_factor_max != 1.0: self.distortion_loss_mult_factor_step = get_upsample_steps(1.0, self.config.distortion_loss_mult_factor_max, len(self.config.distortion_loss_mult_factor_iters)) self.current_distort_loss_mult_factor = self.config.distortion_loss_mult * 1.0 else: self.current_distort_loss_mult_factor = self.config.distortion_loss_mult * 1.0 # Main Field self.field = NeLFProField( num_images=self.num_train_data, num_basis = self.config.num_basis, near_basis = self.config.near_basis, dim_basis = self.config.dim_basis, resolution_basis = self.config.resolution_basis, num_core = self.config.num_core, near_core = self.config.near_core, dim_core = self.config.dim_core, resolution_core_angular = self.config.resolution_core_angular, resolution_core_radial = self.config.resolution_core_radial, freq_theta = self.config.freq_theta, freq_phi = self.config.freq_phi, use_appearance_embedding=self.config.use_appearance_embedding, ) # Proposal Networks self.proposal_networks = torch.nn.ModuleList() assert len(self.config.proposal_net_args_list) == self.config.num_proposal_iterations, 'proposal_net_args_list should have the same length as num_proposal_iterations' for i in range(self.config.num_proposal_iterations): prop_net_args = self.config.proposal_net_args_list[i] network = NeLFDensityField(num_core=self.config.num_core, near_core=self.config.near_core, **prop_net_args) self.proposal_networks.append(network) self.density_fns = [network.density_fn for network in self.proposal_networks] # Proposal Sampler update_schedule = lambda step: np.clip( np.interp(step, [0, self.config.proposal_warmup], [0, self.config.proposal_update_every]), 1, self.config.proposal_update_every, ) self.proposal_sampler = ProposalNetworkSampler( init_sampler=self.config.init_sampler, num_nerf_samples_per_ray=self.config.num_nerf_samples_per_ray, num_proposal_samples_per_ray=[self.config.num_proposal_samples_per_ray_1, self.config.num_proposal_samples_per_ray_2], num_proposal_network_iterations=self.config.num_proposal_iterations, single_jitter=self.config.use_single_jitter, update_sched=update_schedule, ) # Collider if self.config.use_earth_collider: self.collider = EarthCollider(scene_scaling_factor=self.config.earth_collider_scale) else: self.collider = NearFarCollider(near_plane=self.config.near_plane, far_plane=self.config.far_plane) # Renders background_color = ( get_color(self.config.background_color) if self.config.background_color in set(["white", "black"]) else self.config.background_color ) self.renderer_rgb = RGBRenderer(background_color=background_color) self.renderer_accumulation = AccumulationRenderer() self.renderer_depth = DepthRenderer() # losses self.rgb_loss = MSELoss() # metrics self.psnr = PeakSignalNoiseRatio(data_range=1.0) self.ssim = structural_similarity_index_measure self.lpips = LearnedPerceptualImagePatchSimilarity(normalize=True, net_type='vgg') # adjust step-dependent components self.adjust_step_dependent_components() def n_parameters(self): """Return the number of parameters in the model.""" return sum(p.numel() for p in self.field.parameters()) + sum(p.numel() for p in self.proposal_networks.parameters()) def adjust_step_dependent_components(self) -> None: """Call the model's customized load function. Args: checkpoint_dir: directory of checkpoint """ load_step = self.load_step if not load_step: return assert load_step >= 0, f"load_step must be non-negative, got {load_step}" # distortion factor if self.config.distortion_loss_mult_factor_max != 1.0: i = 0 while len(self.distortion_loss_mult_factor_step) > 0 and load_step >= self.config.distortion_loss_mult_factor_iters[i]: i += 1 self.current_distort_loss_mult_factor = self.distortion_loss_mult_factor_step.pop(0) * self.config.distortion_loss_mult assert len(self.config.distortion_loss_mult_factor_iters) - i == len(self.distortion_loss_mult_factor_step), 'distortion_loss_mult_factor_step should have the same length as distortion_loss_mult_factor_iters' # core upsampling if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: i = 0 while len(self.core_radial_upsamling_step) > 0 and load_step >= self.config.iters_core_upsampling[i]: i += 1 angular_res = self.core_angular_upsamling_step.pop(0) radial_res = self.core_radial_upsamling_step.pop(0) self.field.upsample_core((angular_res, radial_res)) assert len(self.config.iters_core_upsampling) - i == len(self.core_radial_upsamling_step), 'core_radial_upsamling_step should have the same length as iters_core_upsampling' # proposal network annealing: ignore as its update frequency is very high. def get_param_groups(self) -> Dict[str, List[Parameter]]: param_groups = {} # seperate the parameters of the field into two groups: fields (e.g. MLP) and fields_coef (e.g. camera fields) field_basis = self.field.get_basis_fields() field_core_angular = self.field.get_core_fields(name='angular') field_core_radial = self.field.get_core_fields(name='radial') param_groups["field_mlp"] = list() param_groups["field_basis"] = list() param_groups["field_core_angular"] = list() param_groups["field_core_radial"] = list() param_groups["proposal_networks"] = list() for field_params in self.field.parameters(): if field_params in field_basis: param_groups["field_basis"].append(field_params) elif field_params in field_core_angular: param_groups["field_core_angular"].append(field_params) elif field_params in field_core_radial: param_groups["field_core_radial"].append(field_params) else: param_groups["field_mlp"].append(field_params) for proposal_parameter in self.proposal_networks.parameters(): param_groups["proposal_networks"].append(proposal_parameter) return param_groups def get_training_callbacks( self, training_callback_attributes: TrainingCallbackAttributes ) -> List[TrainingCallback]: callbacks = [] def reinitialize_optimizer(params_name, training_callback_attributes, step): '''reinitialize optimizer and scheduler after upsampling. ''' optimizers_config = training_callback_attributes.optimizers.config data = training_callback_attributes.pipeline.get_param_groups()[params_name] lr_init = optimizers_config[params_name]["optimizer"].lr opt_state_param_groups = training_callback_attributes.optimizers.optimizers[params_name].state_dict()['param_groups'] training_callback_attributes.optimizers.optimizers[params_name] = optimizers_config[params_name]["optimizer"].setup(params=data) # note: we load_state_dict() for loading param_groups's _lr_initial, which is used for scheduler's last_epoch. training_callback_attributes.optimizers.optimizers[params_name].load_state_dict({ 'state': training_callback_attributes.optimizers.optimizers[params_name].state_dict()['state'], 'param_groups': opt_state_param_groups }) if optimizers_config[params_name]["scheduler"]: # save current state dict training_callback_attributes.optimizers.schedulers[params_name] = optimizers_config[params_name]["scheduler"].setup( optimizer=training_callback_attributes.optimizers.optimizers[params_name], lr_init=lr_init, last_epoch=step, ) # upsampling core factor if self.config.resolution_core_angular < self.config.resolution_max_core_angular or self.config.resolution_core_radial < self.config.resolution_max_core_radial: def upsample_core(self, training_callback_attributes: TrainingCallbackAttributes, step: int): angular_res = self.core_angular_upsamling_step.pop(0) radial_res = self.core_radial_upsamling_step.pop(0) self.field.upsample_core((angular_res, radial_res)) reinitialize_optimizer('field_core_angular', training_callback_attributes, step) reinitialize_optimizer('field_core_radial', training_callback_attributes, step) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.iters_core_upsampling, func=upsample_core, args=[self, training_callback_attributes] ) ) # update distortion loss multiplier if self.config.distortion_loss_mult_factor_max != 1.0: def update_distortion_loss_mult_factor(self, step: int): self.current_distort_loss_mult_factor = self.distortion_loss_mult_factor_step.pop(0) * self.config.distortion_loss_mult callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], iters=self.config.distortion_loss_mult_factor_iters, func=update_distortion_loss_mult_factor, args=[self] ) ) # Proposal Network Annealing and Status Update if self.config.use_proposal_weight_anneal: N = self.config.proposal_weights_anneal_max_num_iters def set_anneal(step): train_frac = np.clip(step / N, 0, 1) bias = lambda x, b: (b * x) / ((b - 1) * x + 1) anneal = bias(train_frac, self.config.proposal_weights_anneal_slope) self.proposal_sampler.set_anneal(anneal) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.BEFORE_TRAIN_ITERATION], update_every_num_iters=1, func=set_anneal, ) ) callbacks.append( TrainingCallback( where_to_run=[TrainingCallbackLocation.AFTER_TRAIN_ITERATION], update_every_num_iters=1, func=self.proposal_sampler.step_cb, ) ) return callbacks def get_outputs(self, ray_bundle: RayBundle): ray_samples, weights_list, ray_samples_list = self.proposal_sampler(ray_bundle, density_fns=self.density_fns) field_outputs = self.field(ray_samples) weights = ray_samples.get_weights(field_outputs['density']) weights_list.append(weights) ray_samples_list.append(ray_samples) rgb = self.renderer_rgb(rgb=field_outputs['rgb'], weights=weights) depth = self.renderer_depth(weights=weights, ray_samples=ray_samples) accumulation = self.renderer_accumulation(weights=weights) outputs = { "rgb": rgb, "accumulation": accumulation, "depth": depth, } if self.training: outputs["tensors"] = { 'basis': field_outputs['basis'], 'core_angular': field_outputs['core_angular'], } # These use a lot of GPU memory, so we avoid storing them for eval. if self.training: outputs["weights_list"] = weights_list outputs["ray_samples_list"] = ray_samples_list for i in range(self.config.num_proposal_iterations): outputs[f"prop_depth_{i}"] = self.renderer_depth(weights=weights_list[i], ray_samples=ray_samples_list[i]) return outputs def get_metrics_dict(self, outputs, batch): metrics_dict = {} image = batch["image"].to(self.device) metrics_dict["psnr"] = self.psnr(outputs["rgb"], image) if self.training:
metrics_dict["distortion"] = distortion_loss(outputs["weights_list"], outputs["ray_samples_list"])
6
2023-12-15 20:07:22+00:00
16k
amazon-science/c2f-seg
src/video_model.py
[ { "identifier": "VQModel", "path": "taming_src/taming_models.py", "snippet": "class VQModel(nn.Module):\n def __init__(self, config):\n super(VQModel, self).__init__()\n self.config = config\n self.iteration = 0\n self.name = config.model_type\n self.m_path = os.path.join(config.path, self.name)\n self.eps = 1e-6\n\n self.ddconfig = config.model['params']['ddconfig']\n n_embed = config.model['params']['n_embed']\n embed_dim = config.model['params']['embed_dim']\n \n self.encoder = Encoder(self.ddconfig).to(config.device)\n self.decoder = Decoder(self.ddconfig).to(config.device)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25).to(config.device).to(config.device)\n self.quant_conv = torch.nn.Conv2d(self.ddconfig[\"z_channels\"], embed_dim, 1).to(config.device)\n # self.quant_proj = torch.nn.Linear(self.ddconfig[\"z_channels\"], embed_dim).to(config.device)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, self.ddconfig[\"z_channels\"], 1).to(config.device)\n # self.pose_quant_proj = torch.nn.Linear(embed_dim, self.ddconfig[\"z_channels\"]).to(config.device)\n\n def encode(self, x, mask=None):\n h = self.encoder(x) # dim=256\n h = self.quant_conv(h) # dim=256\n if mask is not None:\n mask = F.max_pool2d(mask, kernel_size=int(mask.shape[2] / h.shape[2]),\n stride=int(mask.shape[2] / h.shape[2]))\n quant = quant * mask + h * (1 - mask)\n quant, emb_loss, info = self.quantize(h, mask)\n \n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant) # dim: 256\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, x, mask=None):\n quant, diff, _ = self.encode(x, mask) # quant dim: 256\n\n dec = self.decode(quant)\n return dec, diff\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def restore(self, ckpt_file, g_opt=None, d_opt=None):\n torch_init_model(self, ckpt_file, \"state_dict\")\n saving = torch.load(ckpt_file, map_location='cpu')\n if 'optimizer_states' in saving and g_opt is not None and d_opt is not None:\n opt_state = saving['optimizer_states']\n g_opt.load_state_dict(opt_state[0])\n d_opt.load_state_dict(opt_state[1])\n print(f\"Restored from {ckpt_file}\")\n return g_opt, d_opt\n\n def save(self, prefix=None, g_opt=None, d_opt=None):\n if prefix is not None:\n save_path = self.m_path + \"_{}.pth\".format(prefix)\n else:\n save_path = self.m_path + \".pth\"\n\n print('\\nsaving {} {}...\\n'.format(self.name, prefix))\n all_saving = {'state_dict': self.state_dict(),\n 'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}\n torch.save(all_saving, save_path)" }, { "identifier": "MaskedTransformer", "path": "src/video_component.py", "snippet": "class MaskedTransformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n embedding_dim = config.n_embd\n num_embed = config.vocab_size+1\n self.conv_in = torch.nn.Conv2d(2048, embedding_dim//2, 3, padding=1)\n # z_embedding\n self.c_emb = nn.Embedding(num_embed, embedding_dim//4)\n self.z_emb = nn.Embedding(num_embed, embedding_dim//4)\n # posotion embedding\n self.pos_emb = nn.Embedding(config.sequence_length, embedding_dim)\n self.drop = nn.Dropout(config.embd_pdrop)\n # transformer\n self.blocks = nn.ModuleList([Block(config) for _ in range(config.n_layer)])\n # decoder head\n self.dec = Transformer_Prediction(config)\n # z dec and m dec\n self.m_dec = nn.Linear(embedding_dim, num_embed, bias=False)\n self.m_bias = nn.Parameter(torch.zeros(num_embed))\n\n self.sequence_length = config.sequence_length\n self.apply(self._init_weights)\n self.config = config\n self.window_len = int(self.config.window_length)\n\n def forward(self, img_feat, c_idx, z_idx, window_size=(12, 4, 4), mask=None):\n # img_feat: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n i_embeddings = self.conv_in(img_feat) # [B, 768//2-1, 16, 16]\n i_embeddings = i_embeddings.flatten(2).transpose(-2, -1)\n # c and z embedding\n c_embeddings = self.c_emb(c_idx) # [B, 256, D//4]\n z_embeddings = self.z_emb(z_idx) # [B, 256, D//4]\n token_embeddings = torch.cat([i_embeddings, c_embeddings, z_embeddings], dim=2) # [B, 256, D]\n # add positional embeddings\n n_tokens = token_embeddings.shape[1] # 16 * 16\n position_ids = torch.arange(n_tokens, dtype=torch.long, device=z_idx.device)\n position_ids = position_ids.unsqueeze(0).repeat(z_idx.shape[0], 1) # [B, 256, 1]\n position_embeddings = self.pos_emb(position_ids) # [B, 256, D]\n\n x = self.drop(token_embeddings + position_embeddings)\n\n batch_size = token_embeddings.shape[0]\n mask = torch.ones(batch_size, 1, n_tokens, n_tokens).cuda()\n window_size = (self.window_len, 4, 4)\n\n for block in self.blocks:\n x = block(x, window_size=window_size, mask=mask)\n x = torch.roll(x, self.window_len//2, 0)\n\n total_shift_size = (self.window_len//2) * len(self.blocks)\n x = torch.roll(x, batch_size - total_shift_size%batch_size, 0)\n\n x = self.dec(x)\n logits_m = self.m_dec(x) + self.m_bias\n \n return logits_m\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)" }, { "identifier": "Resnet_Encoder", "path": "src/video_component.py", "snippet": "class Resnet_Encoder(nn.Module):\n def __init__(self):\n super(Resnet_Encoder, self).__init__()\n self.encoder = base_resnet()\n\n def forward(self, img):\n features = self.encoder(img)\n return features" }, { "identifier": "Refine_Module", "path": "src/video_component.py", "snippet": "class Refine_Module(nn.Module):\n def __init__(self):\n super(Refine_Module, self).__init__()\n # self.encoder = base_resnet()\n dim = 256 + 2\n self.conv_adapter = torch.nn.Conv2d(2048, 2048, 1)\n self.conv_in = torch.nn.Conv2d(2048, 256, 3, padding=1)\n self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)\n self.bn1 = torch.nn.BatchNorm2d(dim)\n\n self.lay2 = torch.nn.Conv2d(dim, 128, 3, padding=1)\n self.bn2 = torch.nn.BatchNorm2d(128)\n\n self.lay3 = torch.nn.Conv2d(128, 64, 3, padding=1)\n self.bn3 = torch.nn.BatchNorm2d(64)\n self.adapter1 = torch.nn.Conv2d(1024, 128, 1)\n\n # visible mask branch\n self.lay4_vm = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_vm = torch.nn.BatchNorm2d(32)\n self.lay5_vm = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_vm = torch.nn.BatchNorm2d(16)\n self.adapter2_vm = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_vm = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_vm = torch.nn.Conv2d(16, 1, 3, padding=1)\n \n # full mask branch\n self.lay4_am = torch.nn.Conv2d(64, 32, 3, padding=1)\n self.bn4_am = torch.nn.BatchNorm2d(32)\n self.lay5_am = torch.nn.Conv2d(32, 16, 3, padding=1)\n self.bn5_am = torch.nn.BatchNorm2d(16)\n self.adapter2_am = torch.nn.Conv2d(512, 64, 1)\n self.adapter3_am = torch.nn.Conv2d(256, 32, 1)\n self.out_lay_am = torch.nn.Conv2d(16, 1, 3, padding=1)\n \n def get_attn_map(self, feature, guidance):\n b,c,h,w = guidance.shape\n q = torch.flatten(guidance, start_dim=2)\n v = torch.flatten(feature, start_dim=2)\n\n k = v * q\n k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6)\n attn = (k.transpose(-2, -1) @ v) / 1\n attn = F.softmax(attn, dim=-1)\n attn = attn.reshape(b, c, h, w)\n return attn\n \n def forward(self, features, coarse_mask):\n # features: [B, 2048, 16, 16]\n # attn_map: [B, 1, 16, 16]\n # coarse_mask: [B, 1, 256, 256]\n feat = self.conv_adapter(features[-1])\n coarse_mask = F.interpolate(coarse_mask, scale_factor=(1/16))\n attn_map = self.get_attn_map(feat, coarse_mask)\n x = self.conv_in(feat)\n x = torch.cat((x, attn_map, coarse_mask), dim=1)\n x = F.relu(self.bn1(self.lay1(x)))\n x = F.relu(self.bn2(self.lay2(x)))\n \n cur_feat = self.adapter1(features[-2])\n x = cur_feat + x\n x = F.interpolate(x, size=(32, 32), mode=\"nearest\")\n x = F.relu(self.bn3(self.lay3(x)))\n\n # TODO: visible mask branch\n cur_feat_vm = self.adapter2_vm(features[-3])\n x_vm = cur_feat_vm + x\n x_vm = F.interpolate(x_vm, size=(64, 64), mode=\"nearest\")\n x_vm = F.relu(self.bn4_vm(self.lay4_vm(x_vm)))\n\n cur_feat_vm = self.adapter3_vm(features[-4])\n x_vm = cur_feat_vm + x_vm\n x_vm = F.interpolate(x_vm, size=(128, 128), mode=\"nearest\")\n x_vm = F.relu(self.bn5_vm(self.lay5_vm(x_vm)))\n \n x_vm = self.out_lay_vm(x_vm)\n\n # TODO: full mask branch\n cur_feat_am = self.adapter2_am(features[-3])\n x_am = cur_feat_am + x\n x_am = F.interpolate(x_am, size=(64, 64), mode=\"nearest\")\n x_am = F.relu(self.bn4_am(self.lay4_am(x_am)))\n\n cur_feat_am = self.adapter3_am(features[-4])\n x_am = cur_feat_am + x_am\n x_am = F.interpolate(x_am, size=(128, 128), mode=\"nearest\")\n x_am = F.relu(self.bn5_am(self.lay5_am(x_am)))\n \n x_am = self.out_lay_am(x_am)\n\n return x_vm, x_am" }, { "identifier": "VGG19", "path": "src/loss.py", "snippet": "class VGG19(torch.nn.Module):\n def __init__(self, pretrained=True, vgg_norm=False):\n super(VGG19, self).__init__()\n self.vgg_norm = vgg_norm\n features = models.vgg19(pretrained=pretrained).features\n self.relu1_1 = torch.nn.Sequential()\n self.relu1_2 = torch.nn.Sequential()\n\n self.relu2_1 = torch.nn.Sequential()\n self.relu2_2 = torch.nn.Sequential()\n\n self.relu3_1 = torch.nn.Sequential()\n self.relu3_2 = torch.nn.Sequential()\n self.relu3_3 = torch.nn.Sequential()\n self.relu3_4 = torch.nn.Sequential()\n\n self.relu4_1 = torch.nn.Sequential()\n self.relu4_2 = torch.nn.Sequential()\n self.relu4_3 = torch.nn.Sequential()\n self.relu4_4 = torch.nn.Sequential()\n\n self.relu5_1 = torch.nn.Sequential()\n self.relu5_2 = torch.nn.Sequential()\n self.relu5_3 = torch.nn.Sequential()\n self.relu5_4 = torch.nn.Sequential()\n\n for x in range(2):\n self.relu1_1.add_module(str(x), features[x])\n\n for x in range(2, 4):\n self.relu1_2.add_module(str(x), features[x])\n\n for x in range(4, 7):\n self.relu2_1.add_module(str(x), features[x])\n\n for x in range(7, 9):\n self.relu2_2.add_module(str(x), features[x])\n\n for x in range(9, 12):\n self.relu3_1.add_module(str(x), features[x])\n\n for x in range(12, 14):\n self.relu3_2.add_module(str(x), features[x])\n\n for x in range(14, 16):\n self.relu3_3.add_module(str(x), features[x])\n\n for x in range(16, 18):\n self.relu3_4.add_module(str(x), features[x])\n\n for x in range(18, 21):\n self.relu4_1.add_module(str(x), features[x])\n\n for x in range(21, 23):\n self.relu4_2.add_module(str(x), features[x])\n\n for x in range(23, 25):\n self.relu4_3.add_module(str(x), features[x])\n\n for x in range(25, 27):\n self.relu4_4.add_module(str(x), features[x])\n\n for x in range(27, 30):\n self.relu5_1.add_module(str(x), features[x])\n\n for x in range(30, 32):\n self.relu5_2.add_module(str(x), features[x])\n\n for x in range(32, 34):\n self.relu5_3.add_module(str(x), features[x])\n\n for x in range(34, 36):\n self.relu5_4.add_module(str(x), features[x])\n\n # don't need the gradients, just want the features\n for param in self.parameters():\n param.requires_grad = False\n\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n\n def forward(self, x):\n if self.vgg_norm:\n x = (x + 1) / 2 # -1~1 --> 0~1\n # 由0~1重新归一化\n mean = torch.as_tensor(self.mean, dtype=x.dtype, device=x.device)\n std = torch.as_tensor(self.std, dtype=x.dtype, device=x.device)\n x.sub_(mean[None,:, None, None]).div_(std[None,:, None, None])\n\n relu1_1 = self.relu1_1(x)\n relu1_2 = self.relu1_2(relu1_1)\n\n relu2_1 = self.relu2_1(relu1_2)\n relu2_2 = self.relu2_2(relu2_1)\n\n relu3_1 = self.relu3_1(relu2_2)\n relu3_2 = self.relu3_2(relu3_1)\n relu3_3 = self.relu3_3(relu3_2)\n relu3_4 = self.relu3_4(relu3_3)\n\n relu4_1 = self.relu4_1(relu3_4)\n relu4_2 = self.relu4_2(relu4_1)\n relu4_3 = self.relu4_3(relu4_2)\n relu4_4 = self.relu4_4(relu4_3)\n\n relu5_1 = self.relu5_1(relu4_4)\n relu5_2 = self.relu5_2(relu5_1)\n relu5_3 = self.relu5_3(relu5_2)\n relu5_4 = self.relu5_4(relu5_3)\n\n out = {\n 'relu1_1': relu1_1,\n 'relu1_2': relu1_2,\n\n 'relu2_1': relu2_1,\n 'relu2_2': relu2_2,\n\n 'relu3_1': relu3_1,\n 'relu3_2': relu3_2,\n 'relu3_3': relu3_3,\n 'relu3_4': relu3_4,\n\n 'relu4_1': relu4_1,\n 'relu4_2': relu4_2,\n 'relu4_3': relu4_3,\n 'relu4_4': relu4_4,\n\n 'relu5_1': relu5_1,\n 'relu5_2': relu5_2,\n 'relu5_3': relu5_3,\n 'relu5_4': relu5_4,\n }\n return out" }, { "identifier": "PerceptualLoss", "path": "src/loss.py", "snippet": "class PerceptualLoss(nn.Module):\n r\"\"\"\n Perceptual loss, VGG-based\n https://arxiv.org/abs/1603.08155\n https://github.com/dxyang/StyleTransfer/blob/master/utils.py\n \"\"\"\n\n def __init__(self, vgg, weights=[1.0, 1.0, 1.0, 1.0, 1.0], reduction='mean'):\n super(PerceptualLoss, self).__init__()\n # self.add_module('vgg', VGG19())\n self.vgg = vgg\n self.reduction = reduction\n self.criterion = torch.nn.L1Loss(reduction=reduction)\n self.weights = weights\n\n def __call__(self, x, y):\n # Compute features\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n\n if self.reduction == 'mean':\n content_loss = 0.0\n content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])\n content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])\n content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])\n content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])\n content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])\n elif self.reduction == 'none':\n content_loss = []\n content_loss.append(self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']))\n content_loss.append(self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']))\n content_loss.append(self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']))\n content_loss.append(self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']))\n content_loss.append(self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']))\n else:\n raise NotImplementedError\n\n return content_loss" }, { "identifier": "AdamW", "path": "utils/pytorch_optimization.py", "snippet": "class AdamW(Optimizer):\n \"\"\" Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {} - should be >= 0.0\".format(lr))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter: {} - should be in [0.0, 1.0[\".format(betas[1]))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {} - should be >= 0.0\".format(eps))\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\"Adam does not support sparse gradients, please consider SparseAdam instead\")\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n # In-place operations to update the averages at the same time\n # exp_avg.mul_(beta1).add_(1.0 - beta1, grad)\n # exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)\n exp_avg.mul_(beta1).add_(grad, alpha = 1.0 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value = 1.0 - beta2)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n step_size = group[\"lr\"]\n if group[\"correct_bias\"]: # No bias correction for Bert\n bias_correction1 = 1.0 - beta1 ** state[\"step\"]\n bias_correction2 = 1.0 - beta2 ** state[\"step\"]\n step_size = step_size * math.sqrt(bias_correction2) / bias_correction1\n\n # p.data.addcdiv_(-step_size, exp_avg, denom)\n p.data.addcdiv_(exp_avg, denom, value = -step_size)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want to decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n # Add weight decay at the end (fixed version)\n if group[\"weight_decay\"] > 0.0:\n p.data.add_(p.data, alpha = -group[\"lr\"] * group[\"weight_decay\"])\n\n return loss" }, { "identifier": "get_linear_schedule_with_warmup", "path": "utils/pytorch_optimization.py", "snippet": "def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):\n \"\"\" Create a schedule with a learning rate that decreases linearly after\n linearly increasing during a warmup period.\n \"\"\"\n\n def lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n return max(\n 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))\n )\n\n return LambdaLR(optimizer, lr_lambda, last_epoch)" }, { "identifier": "torch_show_all_params", "path": "utils/utils.py", "snippet": "def torch_show_all_params(model):\n params = list(model.parameters())\n k = 0\n for i in params:\n l = 1\n for j in i.size():\n l *= j\n k = k + l\n return k" }, { "identifier": "torch_init_model", "path": "utils/utils.py", "snippet": "def torch_init_model(model, init_checkpoint, key):\n state_dict = torch.load(init_checkpoint, map_location='cpu')[key]\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n load(model, prefix='')\n \n print(\"missing keys:{}\".format(missing_keys))\n print('unexpected keys:{}'.format(unexpected_keys))\n print('error msgs:{}'.format(error_msgs))" }, { "identifier": "Config", "path": "utils/utils.py", "snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')" }, { "identifier": "video_iou", "path": "utils/evaluation.py", "snippet": "def video_iou(pred, labels):\n e = 1e-6\n pred = (pred>0.5).float()\n labels = (labels>0.5).float()\n intersection = pred * labels\n union = (pred + labels) - intersection\n iou = intersection.sum(-1).sum(-1) / (union.sum(-1).sum(-1)+ e)\n return iou" }, { "identifier": "CrossEntropyLoss", "path": "utils/loss.py", "snippet": "class CrossEntropyLoss(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n - num_classes (int): number of classes\n - epsilon (float): weight\n - use_gpu (bool): whether to use gpu devices\n - label_smooth (bool): whether to apply label smoothing, if False, epsilon = 0\n \"\"\"\n def __init__(self, num_classes, epsilon=0.1, device=None, label_smooth=True):\n super(CrossEntropyLoss, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon if label_smooth else 0\n self.device = device\n if device is None:\n self.logsoftmax = nn.LogSoftmax(dim=1)\n else:\n self.logsoftmax = nn.LogSoftmax(dim=1).to(device)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n - inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n - targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.device is not None:\n targets = targets.to(self.device)\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss" } ]
import os import math import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.distributed as dist from torchvision import transforms from taming_src.taming_models import VQModel from src.video_component import MaskedTransformer, Resnet_Encoder, Refine_Module from src.loss import VGG19, PerceptualLoss from utils.pytorch_optimization import AdamW, get_linear_schedule_with_warmup from utils.utils import torch_show_all_params, torch_init_model from utils.utils import Config from utils.evaluation import video_iou from utils.loss import CrossEntropyLoss from tqdm import tqdm
11,267
v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[..., [-1]]] = -float('Inf') return out @torch.no_grad() def batch_predict_maskgit(self, meta, iter, mode, temperature=1.0, T=3, start_iter=0): ''' :param x:[B,3,H,W] image :param c:[b,X,H,W] condition :param mask: [1,1,H,W] mask ''' self.sample_iter += 1 img_feat = self.img_encoder(meta['img_crop'].squeeze().permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) # _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) masked_indices = self.mask_token_idx * torch.ones_like(src_indices, device=src_indices.device) # [B, L] unknown_number_in_the_beginning = torch.sum(masked_indices == self.mask_token_idx, dim=-1) # [B] gamma = self.gamma_func("cosine") cur_ids = masked_indices # [B, L] seq_out = [] mask_out = [] for t in range(start_iter, T): logits = self.transformer(img_feat[-1], src_indices, cur_ids, mask=None) # [B, L, N] logits = logits[..., :-1] logits = self.top_k_logits(logits, k=3) probs = F.softmax(logits, dim=-1) # convert logits into probs [B, 256, vocab_size+1] sampled_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] unknown_map = (cur_ids == self.mask_token_idx) # which tokens need to be sampled -> bool [B, 256] sampled_ids = torch.where(unknown_map, sampled_ids, cur_ids) # replace all -1 with their samples and leave the others untouched [B, 256] seq_out.append(sampled_ids) mask_out.append(1. * unknown_map) ratio = 1. * (t + 1) / T # just a percentage e.g. 1 / 12 mask_ratio = gamma(ratio) selected_probs = probs.gather(dim=-1, index=sampled_ids.unsqueeze(-1)).squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.Tensor([np.inf]).to(logits.device)) # ignore tokens which are already sampled [B, 256] mask_len = torch.unsqueeze(torch.floor(unknown_number_in_the_beginning * mask_ratio), 1) # floor(256 * 0.99) = 254 --> [254, 254, 254, 254, ....] (B x 1) mask_len = torch.maximum(torch.ones_like(mask_len), torch.minimum(torch.sum(unknown_map, dim=-1, keepdim=True) - 1, mask_len)) # Adds noise for randomness masking = self.mask_by_random_topk(mask_len, selected_probs, temperature=self.choice_temperature * (1. - ratio)) # Masks tokens with lower confidence. cur_ids = torch.where(masking, self.mask_token_idx, sampled_ids) # [B, L] seq_ids = torch.stack(seq_out, dim=1) # [B, T, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids[:,-1,:].reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop_old = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop_old) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop'].transpose(1,0)) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop'].transpose(1,0)) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) pred_fm_crop_old = self.align_raw_size(pred_fm_crop_old, meta['obj_position'], meta["vm_pad"], meta) # pred_vm = self.align_raw_size(pred_vm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = self.align_raw_size(pred_fm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = pred_fm + pred_fm_crop_old loss_eval = self.loss_and_evaluation(pred_fm, meta) loss_eval["loss_fm"] = loss_fm loss_eval["loss_vm"] = loss_vm return loss_eval def create_inputs_tokens_normal(self, num, device): self.num_latent_size = self.config['resolution'] // self.config['patch_size'] blank_tokens = torch.ones((num, self.num_latent_size ** 2), device=device) masked_tokens = self.mask_token_idx * blank_tokens return masked_tokens.to(torch.int64) def gamma_func(self, mode="cosine"): if mode == "linear": return lambda r: 1 - r elif mode == "cosine": return lambda r: np.cos(r * np.pi / 2) elif mode == "square": return lambda r: 1 - r ** 2 elif mode == "cubic": return lambda r: 1 - r ** 3 elif mode == "log": return lambda r, total_unknown: - np.log2(r) / np.log2(total_unknown) else: raise NotImplementedError def mask_by_random_topk(self, mask_len, probs, temperature=1.0): confidence = torch.log(probs) + temperature * torch.distributions.gumbel.Gumbel(0, 1).sample(probs.shape).to(probs.device) sorted_confidence, _ = torch.sort(confidence, dim=-1) # from small to large # Obtains cut off threshold given the mask lengths. # cut_off = torch.take_along_dim(sorted_confidence, mask_len.to(torch.long), dim=-1) cut_off = sorted_confidence.gather(dim=-1, index=mask_len.to(torch.long)) # Masks tokens with lower confidence. masking = (confidence < cut_off) return masking def load(self, is_test=False, prefix=None): if prefix is not None: transformer_path = self.transformer_path + prefix + '.pth' else: transformer_path = self.transformer_path + '_last.pth' if self.config.restore or is_test: # transformer_path = '/home/ubuntu/AmodalVQGAN_pre/check_points/fish_amodal_transformer_gjx/GETransformer_210000.pth' if os.path.exists(transformer_path): print('Rank {} is loading {} Transformer...'.format(self.rank, transformer_path)) data = torch.load(transformer_path, map_location="cpu")
class C2F_Seg(nn.Module): def __init__(self, config, g_path, mode, logger=None, save_eval_dict={}): super(C2F_Seg, self).__init__() self.config = config self.iteration = 0 self.sample_iter = 0 self.name = config.model_type # load g model for mask self.g_config = Config(os.path.join(g_path, 'vqgan_{}.yml'.format(config.dataset))) self.g_path = os.path.join(g_path, self.g_config.model_type) self.root_path = config.path self.transformer_path = os.path.join(config.path, self.name) # self.refine_path = os.path.join(config.path, "Refine") self.trans_size = config.trans_size self.mode = mode self.save_eval_dict = save_eval_dict self.eps = 1e-6 self.train_sample_iters = config.train_sample_iters self.g_model = VQModel(self.g_config).to(config.device) self.img_encoder = Resnet_Encoder().to(config.device) self.refine_module = Refine_Module().to(config.device) self.transformer = MaskedTransformer(config).to(config.device) self.g_model.eval() self.refine_criterion = nn.BCELoss() self.criterion = CrossEntropyLoss(num_classes=config.vocab_size+1, device=config.device) if config.train_with_dec: if not config.gumbel_softmax: self.temperature = nn.Parameter(torch.tensor([config.tp], dtype=torch.float32), requires_grad=True).to(config.device) if config.use_vgg: vgg = VGG19(pretrained=True, vgg_norm=config.vgg_norm).to(config.device) vgg.eval() reduction = 'mean' if config.balanced_loss is False else 'none' self.perceptual_loss = PerceptualLoss(vgg, weights=config.vgg_weights, reduction=reduction).to(config.device) else: self.perceptual_loss = None if config.init_gpt_with_vqvae: self.transformer.z_emb.weight = self.g_model.quantize.embedding.weight if logger is not None: logger.info('Gen Parameters:{}'.format(torch_show_all_params(self.g_model))) logger.info('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer))) else: print('Gen Parameters:{}'.format(torch_show_all_params(self.g_model))) print('Transformer Parameters:{}'.format(torch_show_all_params(self.transformer))) # loss no_decay = ['bias', 'ln1.bias', 'ln1.weight', 'ln2.bias', 'ln2.weight'] ignored_param = ['z_emb.weight', 'c_emb.weight'] param_optimizer = self.transformer.named_parameters() param_optimizer_encoder = self.img_encoder.named_parameters() param_optimizer_refine= self.refine_module.named_parameters() optimizer_parameters = [ {'params': [p for n, p in param_optimizer if not any([nd in n for nd in no_decay])], 'weight_decay': config.weight_decay}, {'params': [p for n, p in param_optimizer if any([nd in n for nd in no_decay])], 'weight_decay': 0.0}, {'params': [p for n, p in param_optimizer_encoder], 'weight_decay': config.weight_decay}, {'params': [p for n, p in param_optimizer_refine], 'weight_decay': config.weight_decay}, ] self.opt = AdamW(params=optimizer_parameters, lr=float(config.lr), betas=(config.beta1, config.beta2)) self.sche = get_linear_schedule_with_warmup(self.opt, num_warmup_steps=config.warmup_iters, num_training_steps=config.max_iters) self.rank = dist.get_rank() self.gamma = self.gamma_func(mode=config.gamma_mode) self.mask_token_idx = config.vocab_size self.choice_temperature = 4.5 self.Image_W = config.Image_W self.Image_H = config.Image_H self.patch_W = config.patch_W self.patch_H = config.patch_H @torch.no_grad() def encode_to_z(self, x, mask=None): if len(x.size())==5: x = x[0] x = x.permute((1,0,2,3)) quant_z, _, info = self.g_model.encode(x.float(), mask) # [B,D,H,W] indices = info[2].view(quant_z.shape[0], -1) # [B, L] return quant_z, indices def data_augmentation(self, mask): w = random.randint(5, 11) h = random.randint(5, 11) rdv = random.random() n_repeat = random.randint(1, 3) max_pool = nn.MaxPool2d(kernel_size=(w, h), stride=1, padding=(w//2, h//2)) if rdv < 0.3: for i in range(n_repeat): mask = max_pool(mask) elif rdv >=0.3 and rdv < 0.6: for i in range(n_repeat): mask = -max_pool(-mask) else: mask = mask return mask def get_attn_map(self, feature, guidance): guidance = F.interpolate(guidance, scale_factor=(1/16)) b,c,h,w = guidance.shape q = torch.flatten(guidance, start_dim=2) v = torch.flatten(feature, start_dim=2) k = v * q k = k.sum(dim=-1, keepdim=True) / (q.sum(dim=-1, keepdim=True) + 1e-6) attn = (k.transpose(-2, -1) @ v) / 1 attn = F.softmax(attn, dim=-1) attn = attn.reshape(b, c, h, w) return attn def get_losses(self, meta): self.iteration += 1 z_loss = 0 img_feat = self.img_encoder(meta['img_crop'].squeeze().permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) r = np.maximum(self.gamma(np.random.uniform()), self.config.min_mask_rate) r = math.floor(r * tgt_indices.shape[1]) sample = torch.rand(tgt_indices.shape, device=tgt_indices.device).topk(r, dim=1).indices random_mask = torch.zeros(tgt_indices.shape, dtype=torch.bool, device=tgt_indices.device) random_mask.scatter_(dim=1, index=sample, value=True) # [B, L] # concat mask mask = random_mask masked_indices = self.mask_token_idx * torch.ones_like(tgt_indices, device=tgt_indices.device) # [B, L] z_indices = (~mask) * tgt_indices + mask * masked_indices # [B, L] logits_z = self.transformer(img_feat[-1], src_indices, z_indices, mask=None) target = tgt_indices z_loss = self.criterion(logits_z.view(-1, logits_z.size(-1)), target.view(-1)) with torch.no_grad(): logits_z = logits_z[..., :-1] logits_z = self.top_k_logits(logits_z, k=5) probs = F.softmax(logits_z, dim=-1) seq_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids.reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop.detach()) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop_gt'].transpose(1,0)) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop'].transpose(1,0)) logs = [ ("z_loss", z_loss.item()), ("loss_vm", loss_vm.item()), ("loss_fm", loss_fm.item()), ] return z_loss, loss_vm+loss_fm, logs def align_raw_size(self, full_mask, obj_position, vm_pad, meta): vm_np_crop = meta["vm_no_crop"].squeeze() H, W = vm_np_crop.shape[-2], vm_np_crop.shape[-1] bz, seq_len = full_mask.shape[:2] new_full_mask = torch.zeros((bz, seq_len, H, W)).to(torch.float32).cuda() if len(vm_pad.shape)==3: vm_pad = vm_pad[0] obj_position = obj_position[0] for b in range(bz): paddings = vm_pad[b] position = obj_position[b] new_fm = full_mask[ b, :, :-int(paddings[0]) if int(paddings[0]) !=0 else None, :-int(paddings[1]) if int(paddings[1]) !=0 else None ] vx_min = int(position[0]) vx_max = min(H, int(position[1])+1) vy_min = int(position[2]) vy_max = min(W, int(position[3])+1) resize = transforms.Resize([vx_max-vx_min, vy_max-vy_min]) try: new_fm = resize(new_fm) new_full_mask[b, :, vx_min:vx_max, vy_min:vy_max] = new_fm[0] except: new_fm = new_fm return new_full_mask def loss_and_evaluation(self, pred_fm, meta): loss_eval = {} pred_fm = pred_fm.squeeze() loss_mask = meta["loss_mask"].squeeze().to(pred_fm.device) counts = meta["counts"].reshape(-1).to(pred_fm.device) fm_no_crop = meta["fm_no_crop"].squeeze() vm_no_crop = meta["vm_no_crop"].squeeze() pred_fm = (pred_fm > 0.5).to(torch.int64) full_iou = video_iou(pred_fm, fm_no_crop) occ_iou = video_iou(pred_fm-vm_no_crop, fm_no_crop-vm_no_crop) m_full_iou = (counts * full_iou).sum() / counts.sum() m_occ_iou = (counts * occ_iou).sum() / counts.sum() loss_eval["iou"] = m_full_iou loss_eval["invisible_iou_"] = m_occ_iou loss_eval["iou_count"] = torch.Tensor([1]).cuda() loss_eval["occ_count"] = torch.Tensor([1]).cuda() # post-process pred_fm = pred_fm * (1 - loss_mask) + vm_no_crop * loss_mask pred_fm = (pred_fm > 0.5).to(torch.int64) full_iou = video_iou(pred_fm, fm_no_crop) occ_iou = video_iou(pred_fm-vm_no_crop, fm_no_crop-vm_no_crop) m_full_iou = (counts * full_iou).sum() / counts.sum() m_occ_iou = (counts * occ_iou).sum() / counts.sum() loss_eval["iou_post"] = m_full_iou loss_eval["invisible_iou_post"] = m_occ_iou return loss_eval def backward(self, loss=None): self.opt.zero_grad() loss.backward() self.opt.step() self.sche.step() def top_k_logits(self, logits, k): v, ix = torch.topk(logits, k) out = logits.clone() out[out < v[..., [-1]]] = -float('Inf') return out @torch.no_grad() def batch_predict_maskgit(self, meta, iter, mode, temperature=1.0, T=3, start_iter=0): ''' :param x:[B,3,H,W] image :param c:[b,X,H,W] condition :param mask: [1,1,H,W] mask ''' self.sample_iter += 1 img_feat = self.img_encoder(meta['img_crop'].squeeze().permute((0,3,1,2)).to(torch.float32)) _, src_indices = self.encode_to_z(meta['vm_crop']) # _, tgt_indices = self.encode_to_z(meta['fm_crop']) bhwc = (_.shape[0], _.shape[2], _.shape[3], _.shape[1]) masked_indices = self.mask_token_idx * torch.ones_like(src_indices, device=src_indices.device) # [B, L] unknown_number_in_the_beginning = torch.sum(masked_indices == self.mask_token_idx, dim=-1) # [B] gamma = self.gamma_func("cosine") cur_ids = masked_indices # [B, L] seq_out = [] mask_out = [] for t in range(start_iter, T): logits = self.transformer(img_feat[-1], src_indices, cur_ids, mask=None) # [B, L, N] logits = logits[..., :-1] logits = self.top_k_logits(logits, k=3) probs = F.softmax(logits, dim=-1) # convert logits into probs [B, 256, vocab_size+1] sampled_ids = torch.distributions.categorical.Categorical(probs=probs).sample() # [B, L] unknown_map = (cur_ids == self.mask_token_idx) # which tokens need to be sampled -> bool [B, 256] sampled_ids = torch.where(unknown_map, sampled_ids, cur_ids) # replace all -1 with their samples and leave the others untouched [B, 256] seq_out.append(sampled_ids) mask_out.append(1. * unknown_map) ratio = 1. * (t + 1) / T # just a percentage e.g. 1 / 12 mask_ratio = gamma(ratio) selected_probs = probs.gather(dim=-1, index=sampled_ids.unsqueeze(-1)).squeeze(-1) selected_probs = torch.where(unknown_map, selected_probs, torch.Tensor([np.inf]).to(logits.device)) # ignore tokens which are already sampled [B, 256] mask_len = torch.unsqueeze(torch.floor(unknown_number_in_the_beginning * mask_ratio), 1) # floor(256 * 0.99) = 254 --> [254, 254, 254, 254, ....] (B x 1) mask_len = torch.maximum(torch.ones_like(mask_len), torch.minimum(torch.sum(unknown_map, dim=-1, keepdim=True) - 1, mask_len)) # Adds noise for randomness masking = self.mask_by_random_topk(mask_len, selected_probs, temperature=self.choice_temperature * (1. - ratio)) # Masks tokens with lower confidence. cur_ids = torch.where(masking, self.mask_token_idx, sampled_ids) # [B, L] seq_ids = torch.stack(seq_out, dim=1) # [B, T, L] quant_z = self.g_model.quantize.get_codebook_entry(seq_ids[:,-1,:].reshape(-1), shape=bhwc) pred_fm_crop = self.g_model.decode(quant_z) pred_fm_crop = pred_fm_crop.mean(dim=1, keepdim=True) pred_fm_crop_old = torch.clamp(pred_fm_crop, min=0, max=1) pred_vm_crop, pred_fm_crop = self.refine_module(img_feat, pred_fm_crop_old) pred_vm_crop = F.interpolate(pred_vm_crop, size=(256, 256), mode="nearest") pred_vm_crop = torch.sigmoid(pred_vm_crop) loss_vm = self.refine_criterion(pred_vm_crop, meta['vm_crop'].transpose(1,0)) # pred_vm_crop = (pred_vm_crop>=0.5).to(torch.float32) pred_fm_crop = F.interpolate(pred_fm_crop, size=(256, 256), mode="nearest") pred_fm_crop = torch.sigmoid(pred_fm_crop) loss_fm = self.refine_criterion(pred_fm_crop, meta['fm_crop'].transpose(1,0)) # pred_fm_crop = (pred_fm_crop>=0.5).to(torch.float32) pred_fm_crop_old = self.align_raw_size(pred_fm_crop_old, meta['obj_position'], meta["vm_pad"], meta) # pred_vm = self.align_raw_size(pred_vm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = self.align_raw_size(pred_fm_crop, meta['obj_position'], meta["vm_pad"], meta) pred_fm = pred_fm + pred_fm_crop_old loss_eval = self.loss_and_evaluation(pred_fm, meta) loss_eval["loss_fm"] = loss_fm loss_eval["loss_vm"] = loss_vm return loss_eval def create_inputs_tokens_normal(self, num, device): self.num_latent_size = self.config['resolution'] // self.config['patch_size'] blank_tokens = torch.ones((num, self.num_latent_size ** 2), device=device) masked_tokens = self.mask_token_idx * blank_tokens return masked_tokens.to(torch.int64) def gamma_func(self, mode="cosine"): if mode == "linear": return lambda r: 1 - r elif mode == "cosine": return lambda r: np.cos(r * np.pi / 2) elif mode == "square": return lambda r: 1 - r ** 2 elif mode == "cubic": return lambda r: 1 - r ** 3 elif mode == "log": return lambda r, total_unknown: - np.log2(r) / np.log2(total_unknown) else: raise NotImplementedError def mask_by_random_topk(self, mask_len, probs, temperature=1.0): confidence = torch.log(probs) + temperature * torch.distributions.gumbel.Gumbel(0, 1).sample(probs.shape).to(probs.device) sorted_confidence, _ = torch.sort(confidence, dim=-1) # from small to large # Obtains cut off threshold given the mask lengths. # cut_off = torch.take_along_dim(sorted_confidence, mask_len.to(torch.long), dim=-1) cut_off = sorted_confidence.gather(dim=-1, index=mask_len.to(torch.long)) # Masks tokens with lower confidence. masking = (confidence < cut_off) return masking def load(self, is_test=False, prefix=None): if prefix is not None: transformer_path = self.transformer_path + prefix + '.pth' else: transformer_path = self.transformer_path + '_last.pth' if self.config.restore or is_test: # transformer_path = '/home/ubuntu/AmodalVQGAN_pre/check_points/fish_amodal_transformer_gjx/GETransformer_210000.pth' if os.path.exists(transformer_path): print('Rank {} is loading {} Transformer...'.format(self.rank, transformer_path)) data = torch.load(transformer_path, map_location="cpu")
torch_init_model(self.transformer, transformer_path, 'model')
9
2023-12-21 04:25:47+00:00
16k
lipku/metahuman-stream
main.py
[ { "identifier": "NeRFDataset", "path": "nerf_triplane/provider.py", "snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n self.downscale = downscale\n self.root_path = opt.path\n self.preload = opt.preload # 0 = disk, 1 = cpu, 2 = gpu\n self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.\n self.offset = opt.offset # camera offset\n self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.\n self.fp16 = opt.fp16\n\n self.start_index = opt.data_range[0]\n self.end_index = opt.data_range[1]\n\n self.training = self.type in ['train', 'all', 'trainval']\n self.num_rays = self.opt.num_rays if self.training else -1\n\n # load nerf-compatible format data.\n \n with open(opt.pose, 'r') as f:\n transform = json.load(f)\n\n # load image size\n if 'h' in transform and 'w' in transform:\n self.H = int(transform['h']) // downscale\n self.W = int(transform['w']) // downscale\n else:\n self.H = int(transform['cy']) * 2 // downscale\n self.W = int(transform['cx']) * 2 // downscale\n \n # read images\n frames = transform[\"frames\"]\n\n # use a slice of the dataset\n if self.end_index == -1: # abuse...\n self.end_index = len(frames)\n\n frames = frames[self.start_index:self.end_index]\n print(f'[INFO] load {len(frames)} {type} frames.')\n\n # only load pre-calculated aud features when not live-streaming\n if not self.opt.asr:\n\n # empty means the default self-driven extracted features.\n if self.opt.aud == '':\n if 'esperanto' in self.opt.asr_model:\n aud_features = np.load(os.path.join(self.root_path, 'aud_eo.npy'))\n elif 'deepspeech' in self.opt.asr_model:\n aud_features = np.load(os.path.join(self.root_path, 'aud_ds.npy'))\n else:\n aud_features = np.load(os.path.join(self.root_path, 'aud.npy'))\n # cross-driven extracted features. \n else:\n aud_features = np.load(self.opt.aud)\n\n aud_features = torch.from_numpy(aud_features)\n\n # support both [N, 16] labels and [N, 16, K] logits\n if len(aud_features.shape) == 3:\n aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16] \n\n if self.opt.emb:\n print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')\n aud_features = aud_features.argmax(1) # [N, 16]\n \n else:\n assert self.opt.emb, \"aud only provide labels, must use --emb\"\n aud_features = aud_features.long()\n\n print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')\n\n # load action units\n import pandas as pd\n au_blink_info=pd.read_csv(os.path.join(self.root_path, 'au.csv'))\n au_blink = au_blink_info[' AU45_r'].values\n\n self.torso_img = []\n self.images = []\n\n self.poses = []\n self.exps = []\n\n self.auds = []\n self.face_rect = []\n self.lhalf_rect = []\n self.lips_rect = []\n self.eye_area = []\n self.eye_rect = []\n\n for f in tqdm.tqdm(frames, desc=f'Loading {type} data'):\n\n f_path = os.path.join(self.root_path, 'gt_imgs', str(f['img_id']) + '.jpg')\n\n if not os.path.exists(f_path):\n print('[WARN]', f_path, 'NOT FOUND!')\n continue\n \n pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]\n pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)\n self.poses.append(pose)\n\n if self.preload > 0:\n image = cv2.imread(f_path, cv2.IMREAD_UNCHANGED) # [H, W, 3] o [H, W, 4]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.images.append(image)\n else:\n self.images.append(f_path)\n\n # load frame-wise bg\n \n torso_img_path = os.path.join(self.root_path, 'torso_imgs', str(f['img_id']) + '.png')\n\n if self.preload > 0:\n torso_img = cv2.imread(torso_img_path, cv2.IMREAD_UNCHANGED) # [H, W, 4]\n torso_img = cv2.cvtColor(torso_img, cv2.COLOR_BGRA2RGBA)\n torso_img = torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.torso_img.append(torso_img)\n else:\n self.torso_img.append(torso_img_path)\n\n # find the corresponding audio to the image frame\n if not self.opt.asr and self.opt.aud == '':\n aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...\n self.auds.append(aud)\n\n # load lms and extract face\n lms = np.loadtxt(os.path.join(self.root_path, 'ori_imgs', str(f['img_id']) + '.lms')) # [68, 2]\n\n lh_xmin, lh_xmax = int(lms[31:36, 1].min()), int(lms[:, 1].max()) # actually lower half area\n xmin, xmax = int(lms[:, 1].min()), int(lms[:, 1].max())\n ymin, ymax = int(lms[:, 0].min()), int(lms[:, 0].max())\n self.face_rect.append([xmin, xmax, ymin, ymax])\n self.lhalf_rect.append([lh_xmin, lh_xmax, ymin, ymax])\n\n if self.opt.exp_eye:\n # eyes_left = slice(36, 42)\n # eyes_right = slice(42, 48)\n\n # area_left = polygon_area(lms[eyes_left, 0], lms[eyes_left, 1])\n # area_right = polygon_area(lms[eyes_right, 0], lms[eyes_right, 1])\n\n # # area percentage of two eyes of the whole image...\n # area = (area_left + area_right) / (self.H * self.W) * 100\n\n # action units blink AU45\n area = au_blink[f['img_id']]\n area = np.clip(area, 0, 2) / 2\n # area = area + np.random.rand() / 10\n self.eye_area.append(area)\n\n xmin, xmax = int(lms[36:48, 1].min()), int(lms[36:48, 1].max())\n ymin, ymax = int(lms[36:48, 0].min()), int(lms[36:48, 0].max())\n self.eye_rect.append([xmin, xmax, ymin, ymax])\n\n if self.opt.finetune_lips:\n lips = slice(48, 60)\n xmin, xmax = int(lms[lips, 1].min()), int(lms[lips, 1].max())\n ymin, ymax = int(lms[lips, 0].min()), int(lms[lips, 0].max())\n\n # padding to H == W\n cx = (xmin + xmax) // 2\n cy = (ymin + ymax) // 2\n\n l = max(xmax - xmin, ymax - ymin) // 2\n xmin = max(0, cx - l)\n xmax = min(self.H, cx + l)\n ymin = max(0, cy - l)\n ymax = min(self.W, cy + l)\n\n self.lips_rect.append([xmin, xmax, ymin, ymax])\n \n # load pre-extracted background image (should be the same size as training image...)\n\n if self.opt.bg_img == 'white': # special\n bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)\n elif self.opt.bg_img == 'black': # special\n bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)\n else: # load from file\n # default bg\n if self.opt.bg_img == '':\n self.opt.bg_img = os.path.join(self.root_path, 'bc.jpg')\n bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]\n if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:\n bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)\n bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)\n bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.bg_img = bg_img\n\n self.poses = np.stack(self.poses, axis=0)\n\n # smooth camera path...\n if self.opt.smooth_path:\n self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)\n \n self.poses = torch.from_numpy(self.poses) # [N, 4, 4]\n\n if self.preload > 0:\n self.images = torch.from_numpy(np.stack(self.images, axis=0)) # [N, H, W, C]\n self.torso_img = torch.from_numpy(np.stack(self.torso_img, axis=0)) # [N, H, W, C]\n else:\n self.images = np.array(self.images)\n self.torso_img = np.array(self.torso_img)\n\n if self.opt.asr:\n # live streaming, no pre-calculated auds\n self.auds = None\n else:\n # auds corresponding to images\n if self.opt.aud == '':\n self.auds = torch.stack(self.auds, dim=0) # [N, 32, 16]\n # auds is novel, may have a different length with images\n else:\n self.auds = aud_features\n \n self.bg_img = torch.from_numpy(self.bg_img)\n\n if self.opt.exp_eye:\n self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]\n print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')\n\n if self.opt.smooth_eye:\n\n # naive 5 window average\n ori_eye = self.eye_area.copy()\n for i in range(ori_eye.shape[0]):\n start = max(0, i - 1)\n end = min(ori_eye.shape[0], i + 2)\n self.eye_area[i] = ori_eye[start:end].mean()\n\n self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]\n\n \n # calculate mean radius of all camera poses\n self.radius = self.poses[:, :3, 3].norm(dim=-1).mean(0).item()\n #print(f'[INFO] dataset camera poses: radius = {self.radius:.4f}, bound = {self.bound}')\n\n \n # [debug] uncomment to view all training poses.\n # visualize_poses(self.poses.numpy())\n\n # [debug] uncomment to view examples of randomly generated poses.\n # visualize_poses(rand_poses(100, self.device, radius=self.radius).cpu().numpy())\n\n if self.preload > 1:\n self.poses = self.poses.to(self.device)\n\n if self.auds is not None:\n self.auds = self.auds.to(self.device)\n\n self.bg_img = self.bg_img.to(torch.half).to(self.device)\n\n self.torso_img = self.torso_img.to(torch.half).to(self.device)\n self.images = self.images.to(torch.half).to(self.device)\n \n if self.opt.exp_eye:\n self.eye_area = self.eye_area.to(self.device)\n\n # load intrinsics\n if 'focal_len' in transform:\n fl_x = fl_y = transform['focal_len']\n elif 'fl_x' in transform or 'fl_y' in transform:\n fl_x = (transform['fl_x'] if 'fl_x' in transform else transform['fl_y']) / downscale\n fl_y = (transform['fl_y'] if 'fl_y' in transform else transform['fl_x']) / downscale\n elif 'camera_angle_x' in transform or 'camera_angle_y' in transform:\n # blender, assert in radians. already downscaled since we use H/W\n fl_x = self.W / (2 * np.tan(transform['camera_angle_x'] / 2)) if 'camera_angle_x' in transform else None\n fl_y = self.H / (2 * np.tan(transform['camera_angle_y'] / 2)) if 'camera_angle_y' in transform else None\n if fl_x is None: fl_x = fl_y\n if fl_y is None: fl_y = fl_x\n else:\n raise RuntimeError('Failed to load focal length, please check the transforms.json!')\n\n cx = (transform['cx'] / downscale) if 'cx' in transform else (self.W / 2)\n cy = (transform['cy'] / downscale) if 'cy' in transform else (self.H / 2)\n \n self.intrinsics = np.array([fl_x, fl_y, cx, cy])\n\n # directly build the coordinate meshgrid in [-1, 1]^2\n self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]\n\n\n def mirror_index(self, index):\n size = self.poses.shape[0]\n turn = index // size\n res = index % size\n if turn % 2 == 0:\n return res\n else:\n return size - res - 1\n\n\n def collate(self, index):\n\n B = len(index) # a list of length 1\n # assert B == 1\n\n results = {}\n\n # audio use the original index\n if self.auds is not None:\n auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device)\n results['auds'] = auds\n\n # head pose and bg image may mirror (replay --> <-- --> <--).\n index[0] = self.mirror_index(index[0])\n\n poses = self.poses[index].to(self.device) # [B, 4, 4]\n \n if self.training and self.opt.finetune_lips:\n rect = self.lips_rect[index[0]]\n results['rect'] = rect\n rays = get_rays(poses, self.intrinsics, self.H, self.W, -1, rect=rect)\n else:\n rays = get_rays(poses, self.intrinsics, self.H, self.W, self.num_rays, self.opt.patch_size)\n\n results['index'] = index # for ind. code\n results['H'] = self.H\n results['W'] = self.W\n results['rays_o'] = rays['rays_o']\n results['rays_d'] = rays['rays_d']\n\n # get a mask for rays inside rect_face\n if self.training:\n xmin, xmax, ymin, ymax = self.face_rect[index[0]]\n face_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['face_mask'] = face_mask\n \n xmin, xmax, ymin, ymax = self.lhalf_rect[index[0]]\n lhalf_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['lhalf_mask'] = lhalf_mask\n\n if self.opt.exp_eye:\n results['eye'] = self.eye_area[index].to(self.device) # [1]\n if self.training:\n results['eye'] += (np.random.rand()-0.5) / 10\n xmin, xmax, ymin, ymax = self.eye_rect[index[0]]\n eye_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['eye_mask'] = eye_mask\n\n else:\n results['eye'] = None\n\n # load bg\n bg_torso_img = self.torso_img[index]\n if self.preload == 0: # on the fly loading\n bg_torso_img = cv2.imread(bg_torso_img[0], cv2.IMREAD_UNCHANGED) # [H, W, 4]\n bg_torso_img = cv2.cvtColor(bg_torso_img, cv2.COLOR_BGRA2RGBA)\n bg_torso_img = bg_torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n bg_torso_img = torch.from_numpy(bg_torso_img).unsqueeze(0)\n bg_torso_img = bg_torso_img[..., :3] * bg_torso_img[..., 3:] + self.bg_img * (1 - bg_torso_img[..., 3:])\n bg_torso_img = bg_torso_img.view(B, -1, 3).to(self.device)\n\n if not self.opt.torso:\n bg_img = bg_torso_img\n else:\n bg_img = self.bg_img.view(1, -1, 3).repeat(B, 1, 1).to(self.device)\n\n if self.training:\n bg_img = torch.gather(bg_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n\n results['bg_color'] = bg_img\n\n if self.opt.torso and self.training:\n bg_torso_img = torch.gather(bg_torso_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n results['bg_torso_color'] = bg_torso_img\n\n images = self.images[index] # [B, H, W, 3/4]\n if self.preload == 0:\n images = cv2.imread(images[0], cv2.IMREAD_UNCHANGED) # [H, W, 3]\n images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)\n images = images.astype(np.float32) / 255 # [H, W, 3]\n images = torch.from_numpy(images).unsqueeze(0)\n images = images.to(self.device)\n\n if self.training:\n C = images.shape[-1]\n images = torch.gather(images.view(B, -1, C), 1, torch.stack(C * [rays['inds']], -1)) # [B, N, 3/4]\n \n results['images'] = images\n\n if self.training:\n bg_coords = torch.gather(self.bg_coords, 1, torch.stack(2 * [rays['inds']], -1)) # [1, N, 2]\n else:\n bg_coords = self.bg_coords # [1, N, 2]\n\n results['bg_coords'] = bg_coords\n\n # results['poses'] = convert_poses(poses) # [B, 6]\n # results['poses_matrix'] = poses # [B, 4, 4]\n results['poses'] = poses # [B, 4, 4]\n \n return results\n\n def dataloader(self):\n\n if self.training:\n # training len(poses) == len(auds)\n size = self.poses.shape[0]\n else:\n # test with novel auds, then use its length\n if self.auds is not None:\n size = self.auds.shape[0]\n # live stream test, use 2 * len(poses), so it naturally mirrors.\n else:\n size = 2 * self.poses.shape[0]\n\n loader = DataLoader(list(range(size)), batch_size=1, collate_fn=self.collate, shuffle=self.training, num_workers=0)\n loader._data = self # an ugly fix... we need poses in trainer.\n\n # do evaluate if has gt images and use self-driven setting\n loader.has_gt = (self.opt.aud == '')\n\n return loader " }, { "identifier": "NeRFNetwork", "path": "nerf_triplane/network.py", "snippet": "class NeRFNetwork(NeRFRenderer):\n def __init__(self,\n opt,\n # torso net (hard coded for now)\n ):\n super().__init__(opt)\n\n # audio embedding\n self.emb = self.opt.emb\n\n if 'esperanto' in self.opt.asr_model:\n self.audio_in_dim = 44\n elif 'deepspeech' in self.opt.asr_model:\n self.audio_in_dim = 29\n else:\n self.audio_in_dim = 32\n \n if self.emb:\n self.embedding = nn.Embedding(self.audio_in_dim, self.audio_in_dim)\n\n # audio network\n audio_dim = 32\n self.audio_dim = audio_dim\n self.audio_net = AudioNet(self.audio_in_dim, self.audio_dim)\n\n self.att = self.opt.att\n if self.att > 0:\n self.audio_att_net = AudioAttNet(self.audio_dim)\n\n # DYNAMIC PART\n self.num_levels = 12\n self.level_dim = 1\n self.encoder_xy, self.in_dim_xy = get_encoder('hashgrid', input_dim=2, num_levels=self.num_levels, level_dim=self.level_dim, base_resolution=64, log2_hashmap_size=14, desired_resolution=512 * self.bound)\n self.encoder_yz, self.in_dim_yz = get_encoder('hashgrid', input_dim=2, num_levels=self.num_levels, level_dim=self.level_dim, base_resolution=64, log2_hashmap_size=14, desired_resolution=512 * self.bound)\n self.encoder_xz, self.in_dim_xz = get_encoder('hashgrid', input_dim=2, num_levels=self.num_levels, level_dim=self.level_dim, base_resolution=64, log2_hashmap_size=14, desired_resolution=512 * self.bound)\n\n self.in_dim = self.in_dim_xy + self.in_dim_yz + self.in_dim_xz\n\n ## sigma network\n self.num_layers = 3\n self.hidden_dim = 64\n self.geo_feat_dim = 64\n self.eye_att_net = MLP(self.in_dim, 1, 16, 2)\n self.eye_dim = 1 if self.exp_eye else 0\n self.sigma_net = MLP(self.in_dim + self.audio_dim + self.eye_dim, 1 + self.geo_feat_dim, self.hidden_dim, self.num_layers)\n ## color network\n self.num_layers_color = 2\n self.hidden_dim_color = 64\n self.encoder_dir, self.in_dim_dir = get_encoder('spherical_harmonics')\n self.color_net = MLP(self.in_dim_dir + self.geo_feat_dim + self.individual_dim, 3, self.hidden_dim_color, self.num_layers_color)\n # 处理音频的\n self.unc_net = MLP(self.in_dim, 1, 32, 2)\n\n self.aud_ch_att_net = MLP(self.in_dim, self.audio_dim, 64, 2)\n\n self.testing = False\n\n if self.torso:\n # torso deform network\n self.register_parameter('anchor_points', \n nn.Parameter(torch.tensor([[0.01, 0.01, 0.1, 1], [-0.1, -0.1, 0.1, 1], [0.1, -0.1, 0.1, 1]])))\n self.torso_deform_encoder, self.torso_deform_in_dim = get_encoder('frequency', input_dim=2, multires=8)\n # self.torso_deform_encoder, self.torso_deform_in_dim = get_encoder('tiledgrid', input_dim=2, num_levels=16, level_dim=1, base_resolution=16, log2_hashmap_size=16, desired_resolution=512)\n self.anchor_encoder, self.anchor_in_dim = get_encoder('frequency', input_dim=6, multires=3)\n self.torso_deform_net = MLP(self.torso_deform_in_dim + self.anchor_in_dim + self.individual_dim_torso, 2, 32, 3)\n\n # torso color network\n self.torso_encoder, self.torso_in_dim = get_encoder('tiledgrid', input_dim=2, num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=16, desired_resolution=2048)\n self.torso_net = MLP(self.torso_in_dim + self.torso_deform_in_dim + self.anchor_in_dim + self.individual_dim_torso, 4, 32, 3)\n\n\n def forward_torso(self, x, poses, c=None):\n # x: [N, 2] in [-1, 1]\n # head poses: [1, 4, 4]\n # c: [1, ind_dim], individual code\n\n # test: shrink x\n x = x * self.opt.torso_shrink\n # 对pose进行了调整\n # deformation-based\n wrapped_anchor = self.anchor_points[None, ...] @ poses.permute(0, 2, 1).inverse()\n wrapped_anchor = (wrapped_anchor[:, :, :2] / wrapped_anchor[:, :, 3, None] / wrapped_anchor[:, :, 2, None]).view(1, -1)\n # print(wrapped_anchor)\n # enc_pose = self.pose_encoder(poses)\n enc_anchor = self.anchor_encoder(wrapped_anchor)\n enc_x = self.torso_deform_encoder(x)\n\n if c is not None:\n h = torch.cat([enc_x, enc_anchor.repeat(x.shape[0], 1), c.repeat(x.shape[0], 1)], dim=-1)\n else:\n h = torch.cat([enc_x, enc_anchor.repeat(x.shape[0], 1)], dim=-1)\n\n dx = self.torso_deform_net(h)\n \n x = (x + dx).clamp(-1, 1)\n\n x = self.torso_encoder(x, bound=1)\n\n # h = torch.cat([x, h, enc_a.repeat(x.shape[0], 1)], dim=-1)\n h = torch.cat([x, h], dim=-1)\n\n h = self.torso_net(h)\n\n alpha = torch.sigmoid(h[..., :1])*(1 + 2*0.001) - 0.001\n color = torch.sigmoid(h[..., 1:])*(1 + 2*0.001) - 0.001\n\n return alpha, color, dx\n\n\n @staticmethod\n @torch.jit.script\n def split_xyz(x):\n xy, yz, xz = x[:, :-1], x[:, 1:], torch.cat([x[:,:1], x[:,-1:]], dim=-1)\n return xy, yz, xz\n\n\n def encode_x(self, xyz, bound):\n # x: [N, 3], in [-bound, bound]\n N, M = xyz.shape\n xy, yz, xz = self.split_xyz(xyz)\n feat_xy = self.encoder_xy(xy, bound=bound)\n feat_yz = self.encoder_yz(yz, bound=bound)\n feat_xz = self.encoder_xz(xz, bound=bound)\n \n return torch.cat([feat_xy, feat_yz, feat_xz], dim=-1)\n \n\n def encode_audio(self, a):\n # a: [1, 29, 16] or [8, 29, 16], audio features from deepspeech\n # if emb, a should be: [1, 16] or [8, 16]\n\n # fix audio traininig\n if a is None: return None\n\n if self.emb:\n a = self.embedding(a).transpose(-1, -2).contiguous() # [1/8, 29, 16]\n\n enc_a = self.audio_net(a) # [1/8, 64]\n\n if self.att > 0:\n enc_a = self.audio_att_net(enc_a.unsqueeze(0)) # [1, 64]\n \n return enc_a\n\n \n def predict_uncertainty(self, unc_inp):\n if self.testing or not self.opt.unc_loss:\n unc = torch.zeros_like(unc_inp)\n else:\n unc = self.unc_net(unc_inp.detach())\n\n return unc\n\n\n def forward(self, x, d, enc_a, c, e=None):\n # x: [N, 3], in [-bound, bound]\n # d: [N, 3], nomalized in [-1, 1]\n # enc_a: [1, aud_dim]\n # c: [1, ind_dim], individual code\n # e: [1, 1], eye feature\n enc_x = self.encode_x(x, bound=self.bound)\n\n sigma_result = self.density(x, enc_a, e, enc_x)\n sigma = sigma_result['sigma']\n geo_feat = sigma_result['geo_feat']\n aud_ch_att = sigma_result['ambient_aud']\n eye_att = sigma_result['ambient_eye']\n\n # color\n enc_d = self.encoder_dir(d)\n\n if c is not None:\n h = torch.cat([enc_d, geo_feat, c.repeat(x.shape[0], 1)], dim=-1)\n else:\n h = torch.cat([enc_d, geo_feat], dim=-1)\n \n h_color = self.color_net(h)\n color = torch.sigmoid(h_color)*(1 + 2*0.001) - 0.001\n \n uncertainty = self.predict_uncertainty(enc_x)\n uncertainty = torch.log(1 + torch.exp(uncertainty))\n\n return sigma, color, aud_ch_att, eye_att, uncertainty[..., None]\n\n\n def density(self, x, enc_a, e=None, enc_x=None):\n # x: [N, 3], in [-bound, bound]\n if enc_x is None:\n enc_x = self.encode_x(x, bound=self.bound)\n\n enc_a = enc_a.repeat(enc_x.shape[0], 1)\n aud_ch_att = self.aud_ch_att_net(enc_x)\n enc_w = enc_a * aud_ch_att\n\n if e is not None:\n # e = self.encoder_eye(e)\n eye_att = torch.sigmoid(self.eye_att_net(enc_x))\n e = e * eye_att\n # e = e.repeat(enc_x.shape[0], 1)\n h = torch.cat([enc_x, enc_w, e], dim=-1)\n else:\n h = torch.cat([enc_x, enc_w], dim=-1)\n\n h = self.sigma_net(h)\n\n sigma = torch.exp(h[..., 0])\n geo_feat = h[..., 1:]\n\n return {\n 'sigma': sigma,\n 'geo_feat': geo_feat,\n 'ambient_aud' : aud_ch_att.norm(dim=-1, keepdim=True),\n 'ambient_eye' : eye_att,\n }\n\n\n # optimizer utils\n def get_params(self, lr, lr_net, wd=0):\n\n # ONLY train torso\n if self.torso:\n params = [\n {'params': self.torso_encoder.parameters(), 'lr': lr},\n {'params': self.torso_deform_encoder.parameters(), 'lr': lr, 'weight_decay': wd},\n {'params': self.torso_net.parameters(), 'lr': lr_net, 'weight_decay': wd},\n {'params': self.torso_deform_net.parameters(), 'lr': lr_net, 'weight_decay': wd},\n {'params': self.anchor_points, 'lr': lr_net, 'weight_decay': wd}\n ]\n\n if self.individual_dim_torso > 0:\n params.append({'params': self.individual_codes_torso, 'lr': lr_net, 'weight_decay': wd})\n\n return params\n\n params = [\n {'params': self.audio_net.parameters(), 'lr': lr_net, 'weight_decay': wd}, \n\n {'params': self.encoder_xy.parameters(), 'lr': lr},\n {'params': self.encoder_yz.parameters(), 'lr': lr},\n {'params': self.encoder_xz.parameters(), 'lr': lr},\n # {'params': self.encoder_xyz.parameters(), 'lr': lr},\n\n {'params': self.sigma_net.parameters(), 'lr': lr_net, 'weight_decay': wd},\n {'params': self.color_net.parameters(), 'lr': lr_net, 'weight_decay': wd}, \n ]\n if self.att > 0:\n params.append({'params': self.audio_att_net.parameters(), 'lr': lr_net * 5, 'weight_decay': 0.0001})\n if self.emb:\n params.append({'params': self.embedding.parameters(), 'lr': lr})\n if self.individual_dim > 0:\n params.append({'params': self.individual_codes, 'lr': lr_net, 'weight_decay': wd})\n if self.train_camera:\n params.append({'params': self.camera_dT, 'lr': 1e-5, 'weight_decay': 0})\n params.append({'params': self.camera_dR, 'lr': 1e-5, 'weight_decay': 0})\n\n params.append({'params': self.aud_ch_att_net.parameters(), 'lr': lr_net, 'weight_decay': wd})\n params.append({'params': self.unc_net.parameters(), 'lr': lr_net, 'weight_decay': wd})\n params.append({'params': self.eye_att_net.parameters(), 'lr': lr_net, 'weight_decay': wd})\n\n return params" } ]
import torch import argparse from nerf_triplane.provider import NeRFDataset from nerf_triplane.utils import * from nerf_triplane.network import NeRFNetwork from nerf_triplane.gui import NeRFGUI
11,006
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--init_lips', action='store_true', help="init lips region") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='deepspeech') # parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test and False: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." # if opt.finetune_lips: # # do not update density grid in finetune stage # opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train:
# torch.autograd.set_detect_anomaly(True) # Close tf32 features. Fix low numerical accuracy on rtx30xx gpu. try: torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False except AttributeError as e: print('Info. This pytorch version is not support with tf32.') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', type=str) parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options parser.add_argument('--iters', type=int, default=200000, help="training iters") parser.add_argument('--lr', type=float, default=1e-2, help="initial learning rate") parser.add_argument('--lr_net', type=float, default=1e-3, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### loss set parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps") parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss") parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss") parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss") parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--bg_img', type=str, default='', help="background image") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--init_lips', action='store_true', help="init lips region") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='deepspeech') # parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test and False: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." # if opt.finetune_lips: # # do not update density grid in finetune stage # opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train:
test_set = NeRFDataset(opt, device=device, type='train')
0
2023-12-19 01:32:46+00:00
16k
MingtaoGuo/AnimateAnyone_unofficial
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,828
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
14
2023-12-16 03:31:33+00:00
16k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
12,937
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper
mapper = MapperTrain(cfg, True)
5
2023-12-15 15:40:58+00:00
16k
modelscope/scepter
scepter/studio/preprocess/preprocess.py
[ { "identifier": "Config", "path": "scepter/modules/utils/config.py", "snippet": "class Config(object):\n def __init__(self,\n cfg_dict={},\n load=True,\n cfg_file=None,\n logger=None,\n parser_ins=None):\n '''\n support to parse json/dict/yaml_file of parameters.\n :param load: whether load parameters or not.\n :param cfg_dict: default None.\n :param cfg_level: default None, means the current cfg-level for recurrent cfg presentation.\n :param logger: logger instance for print the cfg log.\n one examples:\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Argparser for Cate process:\\n\"\n )\n parser.add_argument(\n \"--stage\",\n dest=\"stage\",\n help=\"Running stage!\",\n default=\"train\",\n choices=[\"train\"]\n )\n\n cfg = Config(load=True, parser_ins=parser)\n '''\n # checking that the logger exists or not\n if logger is None:\n self.logger = StdMsg(name='Config')\n else:\n self.logger = logger\n self.cfg_dict = cfg_dict\n if load:\n if cfg_file is None:\n assert parser_ins is not None\n self.args = _parse_args(parser_ins)\n self.load_from_file(self.args.cfg_file)\n # os.environ[\"LAUNCHER\"] = self.args.launcher\n os.environ['DATA_ONLINE'] = str(self.args.data_online).lower()\n os.environ['SHARE_STORAGE'] = str(\n self.args.share_storage).lower()\n os.environ['ES_DEBUG'] = str(self.args.debug).lower()\n else:\n self.load_from_file(cfg_file)\n if 'ENV' not in self.cfg_dict:\n self.cfg_dict['ENV'] = {\n 'SEED': 2023,\n 'USE_PL': False,\n 'BACKEND': 'nccl',\n 'SYNC_BN': False,\n 'CUDNN_DETERMINISTIC': True,\n 'CUDNN_BENCHMARK': False\n }\n self.logger.info(\n f\"ENV is not set and will use default ENV as {self.cfg_dict['ENV']}; \"\n f'If want to change this value, please set them in your config.'\n )\n else:\n if 'SEED' not in self.cfg_dict['ENV']:\n self.cfg_dict['ENV']['SEED'] = 2023\n self.logger.info(\n f\"SEED is not set and will use default SEED as {self.cfg_dict['ENV']['SEED']}; \"\n f'If want to change this value, please set it in your config.'\n )\n os.environ['ES_SEED'] = str(self.cfg_dict['ENV']['SEED'])\n self._update_dict(self.cfg_dict)\n if load:\n self.logger.info(f'Parse cfg file as \\n {self.dump()}')\n\n def load_from_file(self, file_name):\n self.logger.info(f'Loading config from {file_name}')\n if file_name is None or not os.path.exists(file_name):\n self.logger.info(f'File {file_name} does not exist!')\n self.logger.warning(\n f\"Cfg file is None or doesn't exist, Skip loading config from {file_name}.\"\n )\n return\n if file_name.endswith('.json'):\n self.cfg_dict = self._load_json(file_name)\n self.logger.info(\n f'System take {file_name} as json, because we find json in this file'\n )\n elif file_name.endswith('.yaml'):\n self.cfg_dict = self._load_yaml(file_name)\n self.logger.info(\n f'System take {file_name} as yaml, because we find yaml in this file'\n )\n else:\n self.logger.info(\n f'No config file found! Because we do not find json or yaml in --cfg {file_name}'\n )\n\n def _update_dict(self, cfg_dict):\n def recur(key, elem):\n if type(elem) is dict:\n return key, Config(load=False,\n cfg_dict=elem,\n logger=self.logger)\n elif type(elem) is list:\n config_list = []\n for idx, ele in enumerate(elem):\n if type(ele) is str and ele[1:3] == 'e-':\n ele = float(ele)\n config_list.append(ele)\n elif type(ele) is str:\n config_list.append(ele)\n elif type(ele) is dict:\n config_list.append(\n Config(load=False,\n cfg_dict=ele,\n logger=self.logger))\n elif type(ele) is list:\n config_list.append(ele)\n else:\n config_list.append(ele)\n return key, config_list\n else:\n if type(elem) is str and elem[1:3] == 'e-':\n elem = float(elem)\n return key, elem\n\n dic = dict(recur(k, v) for k, v in cfg_dict.items())\n self.__dict__.update(dic)\n\n def _load_json(self, cfg_file):\n '''\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n cfg = json.load(open(file_name, 'r'))\n except Exception as e:\n self.logger.error(f'Load json from {cfg_file} error. Message: {e}')\n sys.exit()\n return cfg\n\n def _load_yaml(self, cfg_file):\n '''\n if replace some parameters from Base, You can reference the base parameters use Base.\n\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n with open(cfg_file, 'r') as f:\n cfg = yaml.load(f.read(), Loader=yaml.SafeLoader)\n except Exception as e:\n self.logger.error(f'Load yaml from {cfg_file} error. Message: {e}')\n sys.exit()\n if '_BASE_RUN' not in cfg.keys() and '_BASE_MODEL' not in cfg.keys(\n ) and '_BASE' not in cfg.keys():\n return cfg\n\n if '_BASE' in cfg.keys():\n if cfg['_BASE'][1] == '.':\n prev_count = cfg['_BASE'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - cfg['_BASE'].count('..'))] +\n cfg['_BASE'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n else:\n if '_BASE_RUN' in cfg.keys():\n if cfg['_BASE_RUN'][1] == '.':\n prev_count = cfg['_BASE_RUN'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - prev_count)] +\n cfg['_BASE_RUN'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_RUN'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base,\n cfg,\n preserve_base=True)\n if '_BASE_MODEL' in cfg.keys():\n if cfg['_BASE_MODEL'][1] == '.':\n prev_count = cfg['_BASE_MODEL'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(\n -1 - cfg['_BASE_MODEL'].count('..'))] +\n cfg['_BASE_MODEL'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_MODEL'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n return cfg\n\n def _path_join(self, path_list):\n path = ''\n for p in path_list:\n path += p + '/'\n return path[:-1]\n\n def items(self):\n return self.cfg_dict.items()\n\n def _merge_cfg_from_base(self, cfg_base, cfg, preserve_base=False):\n for k, v in cfg.items():\n if k in cfg_base.keys():\n if isinstance(v, dict):\n self._merge_cfg_from_base(cfg_base[k], v)\n else:\n cfg_base[k] = v\n else:\n if 'BASE' not in k or preserve_base:\n cfg_base[k] = v\n return cfg_base\n\n def _merge_cfg_from_command(self, args, cfg):\n assert len(\n args.opts\n ) % 2 == 0, f'Override list {args.opts} has odd length: {len(args.opts)}'\n\n keys = args.opts[0::2]\n vals = args.opts[1::2]\n\n # maximum supported depth 3\n for idx, key in enumerate(keys):\n key_split = key.split('.')\n assert len(\n key_split\n ) <= 4, 'Key depth error. \\n Maximum depth: 3\\n Get depth: {}'.format(\n len(key_split))\n assert key_split[0] in cfg.keys(), 'Non-existant key: {}.'.format(\n key_split[0])\n if len(key_split) == 2:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 3:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 4:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[3] in cfg[key_split[0]][key_split[1]][\n key_split[2]].keys(), 'Non-existant key: {}'.format(key)\n\n if len(key_split) == 1:\n cfg[key_split[0]] = vals[idx]\n elif len(key_split) == 2:\n cfg[key_split[0]][key_split[1]] = vals[idx]\n elif len(key_split) == 3:\n cfg[key_split[0]][key_split[1]][key_split[2]] = vals[idx]\n elif len(key_split) == 4:\n cfg[key_split[0]][key_split[1]][key_split[2]][\n key_split[3]] = vals[idx]\n\n return cfg\n\n def __repr__(self):\n return '{}\\n'.format(self.dump())\n\n def dump(self):\n return json.dumps(self.cfg_dict, indent=2)\n\n def deep_copy(self):\n return copy.deepcopy(self)\n\n def have(self, name):\n if name in self.__dict__:\n return True\n return False\n\n def get(self, name, default=None):\n if name in self.__dict__:\n return self.__dict__[name]\n return default\n\n def __getitem__(self, key):\n return self.__dict__.__getitem__(key)\n\n def __setattr__(self, key, value):\n super().__setattr__(key, value)\n if hasattr(self, 'cfg_dict') and key in self.cfg_dict:\n if isinstance(value, Config):\n value = value.cfg_dict\n self.cfg_dict[key] = value\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n self.__setattr__(key, value)\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def set(self, name, value):\n new_dict = {name: value}\n self.__dict__.update(new_dict)\n self.__setattr__(name, value)\n\n def get_dict(self):\n return self.cfg_dict\n\n def get_lowercase_dict(self, cfg_dict=None):\n if cfg_dict is None:\n cfg_dict = self.get_dict()\n config_new = {}\n for key, val in cfg_dict.items():\n if isinstance(key, str):\n if isinstance(val, dict):\n config_new[key.lower()] = self.get_lowercase_dict(val)\n else:\n config_new[key.lower()] = val\n else:\n config_new[key] = val\n return config_new\n\n @staticmethod\n def get_plain_cfg(cfg=None):\n if isinstance(cfg, Config):\n cfg_new = {}\n cfg_dict = cfg.get_dict()\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, dict):\n cfg_new = {}\n cfg_dict = cfg\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, list):\n cfg_new = []\n cfg_list = cfg\n for val in cfg_list:\n if isinstance(val, (Config, dict, list)):\n cfg_new.append(Config.get_plain_cfg(val))\n elif isinstance(val, (str, numbers.Number)):\n cfg_new.append(val)\n return cfg_new\n else:\n return cfg" }, { "identifier": "FS", "path": "scepter/modules/utils/file_system.py", "snippet": "FS = FileSystem()" }, { "identifier": "CreateDatasetUI", "path": "scepter/studio/preprocess/caption_editor_ui/create_dataset_ui.py", "snippet": "class CreateDatasetUI(UIBase):\n def __init__(self, cfg, is_debug=False, language='en'):\n self.work_dir = cfg.WORK_DIR\n self.dir_list = FS.walk_dir(self.work_dir, recurse=False)\n self.cache_file = {}\n self.meta_dict = {}\n self.dataset_list = self.load_history()\n self.components_name = CreateDatasetUIName(language)\n\n def load_meta(self, meta_file):\n dataset_meta = json.load(open(meta_file, 'r'))\n return dataset_meta\n\n def write_csv(self, file_list, save_csv, data_folder):\n with FS.put_to(save_csv) as local_path:\n print(local_path)\n with open(local_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['Target:FILE', 'Prompt'])\n for one_file in file_list:\n relative_file = one_file['relative_path']\n if relative_file.startswith('/'):\n relative_file = relative_file[1:]\n writer.writerow([relative_file, one_file['caption']])\n return save_csv\n\n def write_file_list(self, file_list, save_csv):\n with FS.put_to(save_csv) as local_path:\n print(local_path)\n with open(local_path, 'w') as f:\n for one_file in file_list:\n is_flag, file_path = self.del_prefix(\n one_file['image_path'], prefix=one_file['prefix'])\n f.write('{},{},{},{}\\n'.format(file_path,\n one_file['width'],\n one_file['height'],\n one_file['caption']))\n return save_csv\n\n def save_meta(self, meta, dataset_folder):\n meta_file = os.path.join(dataset_folder, 'meta.json')\n save_meta = copy.deepcopy(meta)\n if 'local_work_dir' in meta:\n save_meta.pop('local_work_dir')\n if 'work_dir' in meta:\n save_meta.pop('work_dir')\n with FS.put_to(meta_file) as local_path:\n json.dump(save_meta, open(local_path, 'w'))\n return meta_file\n\n def construct_meta(self, cursor, file_list, dataset_folder, user_name):\n '''\n {\n \"dataset_name\": \"xxxx\",\n \"dataset_scale\": 100,\n \"file_list\": \"xxxxx\", # image_path#;#width#;#height#;#caption\n \"update_time\": \"\",\n \"create_time\": \"\"\n }\n '''\n train_csv = os.path.join(dataset_folder, 'train.csv')\n train_csv = self.write_csv(file_list, train_csv, dataset_folder)\n save_file_list = os.path.join(dataset_folder, 'file.csv')\n save_file_list = self.write_file_list(file_list, save_file_list)\n meta = {\n 'dataset_name': user_name,\n 'cursor': cursor,\n 'file_list': file_list,\n 'train_csv': train_csv,\n 'save_file_list': save_file_list\n }\n self.save_meta(meta, dataset_folder)\n return meta\n\n def load_from_list(self, save_file, dataset_folder, local_dataset_folder):\n file_list = []\n images_folder = os.path.join(local_dataset_folder, 'images')\n os.makedirs(images_folder, exist_ok=True)\n with FS.get_from(save_file) as local_path:\n all_remote_list, all_local_list = [], []\n all_save_list = []\n with open(local_path, 'r') as f:\n for line in tqdm(f):\n line = line.strip()\n if line == '':\n continue\n try:\n image_path, width, height, caption = line.split(\n '#;#', 3)\n except Exception:\n try:\n image_path, width, height, caption = line.split(\n ',', 3)\n except Exception:\n raise gr.Error('列表只支持,或#;#作为分割符,四列分别为图像路径/宽/高/描述')\n is_legal, new_path, prefix = self.find_prefix(image_path)\n try:\n int(width), int(height)\n except Exception:\n raise gr.Error(f'不合法的width({width}),height({height})')\n\n if not is_legal:\n raise gr.Error(\n f'路径不支持{image_path},应该为oss路径(oss://)或者省略前缀(xxx/xxx)'\n )\n relative_path = os.path.join('images',\n image_path.split('/')[-1])\n\n all_remote_list.append(new_path)\n all_local_list.append(\n os.path.join(local_dataset_folder, relative_path))\n all_save_list.append(\n os.path.join(dataset_folder, relative_path))\n file_list.append({\n 'image_path':\n os.path.join(dataset_folder, relative_path),\n 'relative_path':\n relative_path,\n 'width':\n int(width),\n 'height':\n int(height),\n 'caption':\n caption,\n 'prefix':\n prefix,\n 'edit_caption':\n caption\n })\n cache_file_list = []\n for idx, local_path in enumerate(\n FS.get_batch_objects_from(all_remote_list)):\n if local_path is None:\n raise gr.Error(f'下载图像失败{all_remote_list[idx]}')\n _ = FS.put_object_from_local_file(local_path, all_local_list[idx])\n cache_file_list.append(local_path)\n\n for local_path, target_path, flg in FS.put_batch_objects_to(\n cache_file_list, all_save_list):\n if not flg:\n raise gr.Error(f'上传图像失败{local_path}')\n if os.path.exists(local_path):\n try:\n os.remove(local_path)\n except Exception:\n pass\n\n return file_list\n\n def load_from_zip(self, save_file, data_folder, local_dataset_folder):\n with FS.get_from(save_file) as local_path:\n res = os.popen(\n f\"unzip -o '{local_path}' -d '{local_dataset_folder}'\")\n res = res.readlines()\n if not os.path.exists(local_dataset_folder):\n raise gr.Error(f'解压{save_file}失败{str(res)}')\n file_folder = None\n train_list = None\n hit_dir = None\n raw_list = []\n mac_osx = os.path.join(local_dataset_folder, '__MACOSX')\n if os.path.exists(mac_osx):\n res = os.popen(f\"rm -rf '{mac_osx}'\")\n res = res.readlines()\n for one_dir in FS.walk_dir(local_dataset_folder, recurse=False):\n if one_dir.endswith('__MACOSX'):\n res = os.popen(f\"rm -rf '{one_dir}'\")\n res = res.readlines()\n continue\n if FS.isdir(one_dir):\n sub_dir = FS.walk_dir(one_dir)\n for one_s_dir in sub_dir:\n if FS.isdir(one_s_dir) and one_s_dir.split(\n one_dir)[1].replace('/', '') == 'images':\n file_folder = one_s_dir\n hit_dir = one_dir\n if FS.isfile(one_s_dir) and one_s_dir.split(\n one_dir)[1].replace('/', '') == 'train.csv':\n train_list = one_s_dir\n if file_folder is not None and train_list is not None:\n break\n if (one_s_dir.endswith('.jpg')\n or one_s_dir.endswith('.jpeg')\n or one_s_dir.endswith('.png')\n or one_s_dir.endswith('.webp')):\n raw_list.append(one_s_dir)\n else:\n if (one_dir.endswith('.jpg') or one_dir.endswith('.jpeg')\n or one_dir.endswith('.png')\n or one_dir.endswith('.webp')):\n raw_list.append(one_dir)\n\n if file_folder is None and len(raw_list) < 1:\n raise gr.Error(\n \"images folder or train.csv doesn't exists, or nothing exists in your zip\"\n )\n new_file_folder = f'{local_dataset_folder}/images'\n os.makedirs(new_file_folder, exist_ok=True)\n if file_folder is not None:\n res = os.popen(f\"mv '{file_folder}'/* '{new_file_folder}'\")\n res = res.readlines()\n elif len(raw_list) > 0:\n raw_list = list(set(raw_list))\n for img_id, cur_image in enumerate(raw_list):\n _, surfix = os.path.splitext(cur_image)\n try:\n os.rename(\n os.path.abspath(cur_image),\n f'{new_file_folder}/{get_md5(cur_image)}{surfix}')\n raw_list[img_id] = [\n os.path.join('images',\n f'{get_md5(cur_image)}{surfix}'),\n cur_image.split('/')[-1]\n ]\n except Exception as e:\n print(e)\n\n if not os.path.exists(new_file_folder):\n raise gr.Error(f'{str(res)}')\n new_train_list = f'{local_dataset_folder}/train.csv'\n if train_list is None or not os.path.exists(train_list):\n with open(new_train_list, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['Target:FILE', 'Prompt'])\n for cur_image, cur_prompt in raw_list:\n writer.writerow([cur_image, cur_prompt])\n else:\n res = os.popen(f\"mv '{train_list}' '{new_train_list}'\")\n res = res.readlines()\n if not os.path.exists(new_train_list):\n raise gr.Error(f'{str(res)}')\n res = os.popen(f\"rm -rf '{hit_dir}'\")\n res = res.readlines()\n file_list = self.load_train_csv(new_train_list, data_folder)\n return file_list\n\n def get_image_meta(self, image):\n img = Image.open(image)\n return img.size\n\n def load_train_csv(self, file_path, data_folder):\n base_folder = os.path.dirname(file_path)\n file_list = []\n with open(file_path, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n image_path, prompt = row[0], row[1]\n if image_path == 'Target:FILE':\n continue\n local_image_path = os.path.join(base_folder, image_path)\n w, h = self.get_image_meta(local_image_path)\n file_list.append({\n 'image_path':\n os.path.join(data_folder, image_path),\n 'relative_path':\n image_path,\n 'width':\n w,\n 'height':\n h,\n 'caption':\n prompt,\n 'prefix':\n '',\n 'edit_caption':\n prompt\n })\n return file_list\n\n def find_prefix(self, file_path):\n for k in FS._prefix_to_clients.keys():\n if file_path.startswith(k):\n return True, file_path, ''\n elif FS.exists(os.path.join(k, file_path)):\n return True, os.path.join(k, file_path), ''\n elif FS.exists(os.path.join(k, 'datasets', file_path)):\n return True, os.path.join(k, 'datasets', file_path), 'datasets'\n return False, None, None\n\n def del_prefix(self, file_path, prefix=''):\n for k in FS._prefix_to_clients.keys():\n if file_path.startswith(k):\n file_path = file_path.replace(k, '')\n while file_path.startswith('/'):\n file_path = file_path[1:]\n if not prefix == '' and file_path.startswith(prefix):\n file_path = file_path.split(prefix)[-1]\n while file_path.startswith('/'):\n file_path = file_path[1:]\n return True, file_path\n return False, file_path\n\n def load_history(self):\n dataset_list = []\n for one_dir in self.dir_list:\n if FS.isdir(one_dir):\n meta_file = os.path.join(one_dir, 'meta.json')\n if FS.exists(meta_file):\n local_dataset_folder, _ = FS.map_to_local(one_dir)\n local_dataset_folder = FS.get_dir_to_local_dir(\n one_dir, local_dataset_folder)\n meta_data = self.load_meta(\n os.path.join(local_dataset_folder, 'meta.json'))\n meta_data['local_work_dir'] = local_dataset_folder\n meta_data['work_dir'] = one_dir\n dataset_list.append(meta_data['dataset_name'])\n self.meta_dict[meta_data['dataset_name']] = meta_data\n return dataset_list\n\n def create_ui(self):\n with gr.Box():\n gr.Markdown(self.components_name.user_direction)\n with gr.Box():\n with gr.Row():\n with gr.Column(scale=1, min_width=0):\n self.dataset_name = gr.Dropdown(\n label=self.components_name.dataset_name,\n choices=self.dataset_list,\n interactive=True)\n with gr.Column(scale=1, min_width=0):\n self.refresh_dataset_name = gr.Button(\n value=self.components_name.refresh_list_button)\n self.btn_create_datasets = gr.Button(\n value=self.components_name.btn_create_datasets)\n self.btn_create_datasets_from_file = gr.Button(\n value=self.components_name.\n btn_create_datasets_from_file)\n self.panel_state = gr.Checkbox(label='panel_state',\n value=False,\n visible=False)\n with gr.Column(scale=2, min_width=0):\n with gr.Row(equal_height=True):\n with gr.Column(visible=False, min_width=0) as panel:\n self.user_data_name = gr.Text(\n label=self.components_name.user_data_name,\n value='',\n interactive=True)\n self.user_data_name_state = gr.State(value='')\n self.create_mode = gr.State(value=0)\n with gr.Column(visible=False,\n min_width=0) as file_panel:\n self.file_path = gr.File(\n label=self.components_name.zip_file,\n min_width=0,\n file_types=['.zip', '.txt', '.csv'])\n self.file_path_url = gr.Text(\n label=self.components_name.zip_file_url,\n value='',\n visible=False)\n with gr.Column(visible=False,\n min_width=0) as btn_panel:\n self.random_data_button = gr.Button(\n value=refresh_symbol)\n self.confirm_data_button = gr.Button(\n value=self.components_name.confirm_data_button)\n with gr.Column(visible=False,\n min_width=0) as modify_panel:\n self.modify_data_button = gr.Button(\n value=self.components_name.modify_data_button)\n\n self.dataset_panel = panel\n self.btn_panel = btn_panel\n self.file_panel = file_panel\n self.modify_panel = modify_panel\n\n def set_callbacks(self, gallery_dataset, export_dataset):\n def show_dataset_panel():\n return (gr.Column(visible=False), gr.Column(visible=True),\n gr.Column(visible=True),\n gr.Checkbox(value=False, visible=False),\n gr.Text(value=get_random_dataset_name(),\n interactive=True), 1)\n\n def show_file_panel():\n return (gr.Column(visible=True), gr.Column(visible=True),\n gr.Column(visible=True),\n gr.Checkbox(value=False, visible=False),\n gr.Text(value=get_random_dataset_name(), interactive=True),\n gr.File(value=None), gr.Text(value='', visible=False), 2)\n\n def get_random_dataset_name():\n data_name = 'name-version-{0:%Y%m%d_%H_%M_%S}'.format(\n datetime.datetime.now())\n return data_name\n\n def refresh():\n return gr.Dropdown(value=self.dataset_list[-1]\n if len(self.dataset_list) > 0 else '',\n choices=self.dataset_list)\n\n self.refresh_dataset_name.click(refresh, outputs=[self.dataset_name])\n\n def confirm_create_dataset(user_name, create_mode, file_url, file_path,\n panel_state):\n if user_name.strip() == '' or ' ' in user_name or '/' in user_name:\n raise gr.Error(self.components_name.illegal_data_name_err1)\n\n if len(user_name.split('-')) < 3:\n raise gr.Error(self.components_name.illegal_data_name_err2)\n\n if '.' in user_name:\n raise gr.Error(self.components_name.illegal_data_name_err3)\n\n if not file_url.strip() == '' and file_path is not None:\n raise gr.Error(self.components_name.illegal_data_name_err4)\n if create_mode == 1 and not file_url.strip() == '':\n file_name, surfix = os.path.splitext(file_url.split('?')[0])\n save_file = os.path.join(self.work_dir, f'{user_name}{surfix}')\n with FS.put_to(save_file) as local_path:\n res = os.popen(f\"wget '{file_url}' -O '{local_path}'\")\n res.readlines()\n if not FS.exists(save_file):\n raise gr.Error(\n f'{self.components_name.illegal_data_err1} {str(res)}')\n elif create_mode == 2 and file_path is not None and file_path.name:\n self.cache_file[user_name] = {\n 'file_name': file_path.name,\n 'surfix': os.path.splitext(file_path.name)[-1]\n }\n cache_file = self.cache_file.pop(user_name)\n surfix = cache_file['surfix']\n ori_file = cache_file['file_name']\n save_file = os.path.join(self.work_dir, f'{user_name}{surfix}')\n with FS.put_to(save_file) as local_path:\n res = os.popen(f\"cp '{ori_file}' '{local_path}'\")\n res = res.readlines()\n if not FS.exists(save_file):\n raise gr.Error(\n f'{self.components_name.illegal_data_err1}{str(res)}')\n else:\n surfix = None\n # untar file or create blank dataset\n dataset_folder = os.path.join(self.work_dir, user_name)\n local_dataset_folder, _ = FS.map_to_local(dataset_folder)\n if surfix == '.zip':\n file_list = self.load_from_zip(save_file, dataset_folder,\n local_dataset_folder)\n elif surfix in ['.txt', '.csv']:\n file_list = self.load_from_list(save_file, dataset_folder,\n local_dataset_folder)\n elif surfix is None:\n file_list = []\n else:\n raise gr.Error(\n f'{self.components_name.illegal_data_err2} {surfix}')\n is_flag = FS.put_dir_from_local_dir(local_dataset_folder,\n dataset_folder)\n if not is_flag:\n raise gr.Error(f'{self.components_name.illegal_data_err3}')\n\n cursor = 0 if len(file_list) > 0 else -1\n meta = self.construct_meta(cursor, file_list, dataset_folder,\n user_name)\n\n meta['local_work_dir'] = local_dataset_folder\n meta['work_dir'] = dataset_folder\n\n self.meta_dict[meta['dataset_name']] = meta\n self.dataset_list.append(meta['dataset_name'])\n return (\n gr.Checkbox(value=True, visible=False),\n gr.Dropdown(value=user_name, choices=self.dataset_list),\n )\n\n def clear_file():\n return gr.Text(visible=True)\n\n # Click Create\n self.btn_create_datasets.click(show_dataset_panel, [], [\n self.file_panel, self.dataset_panel, self.btn_panel,\n self.panel_state, self.user_data_name, self.create_mode\n ])\n\n self.btn_create_datasets_from_file.click(show_file_panel, [], [\n self.file_panel, self.dataset_panel, self.btn_panel,\n self.panel_state, self.user_data_name, self.file_path,\n self.file_path_url, self.create_mode\n ])\n\n # Click Refresh\n self.random_data_button.click(get_random_dataset_name, [],\n [self.user_data_name])\n\n self.file_path.clear(clear_file, outputs=[self.file_path_url])\n\n # Click Confirm\n self.confirm_data_button.click(confirm_create_dataset, [\n self.user_data_name, self.create_mode, self.file_path_url,\n self.file_path, self.panel_state\n ], [self.panel_state, self.dataset_name])\n\n def show_edit_panel(panel_state, data_name):\n if panel_state:\n return (gr.Row(visible=True), gr.Row(visible=True),\n gr.Row(visible=True), gr.Column(visible=True),\n gr.Column(visible=False), gr.Column(visible=False),\n data_name)\n else:\n return (gr.Row(visible=False), gr.Row(visible=False),\n gr.Row(visible=False), gr.Column(visible=False),\n gr.Column(), gr.Column(), data_name)\n\n self.panel_state.change(\n show_edit_panel, [self.panel_state, self.dataset_name], [\n gallery_dataset.gallery_panel, gallery_dataset.upload_panel,\n export_dataset.export_panel, self.modify_panel,\n self.file_panel, self.btn_panel, self.user_data_name_state\n ])\n\n def modify_data_name(user_name, prev_data_name):\n print(\n f'Current file name {prev_data_name}, new file name {user_name}.'\n )\n if user_name.strip() == '' or ' ' in user_name or '/' in user_name:\n raise gr.Error(self.components_name.illegal_data_name_err1)\n if len(user_name.split('-')) < 3:\n raise gr.Error(self.components_name.illegal_data_name_err2)\n if '.' in user_name:\n raise gr.Error(self.components_name.illegal_data_name_err3)\n if user_name != prev_data_name:\n if prev_data_name in self.meta_dict:\n ori_meta = self.meta_dict[prev_data_name]\n dataset_folder = os.path.join(self.work_dir, user_name)\n local_dataset_folder, _ = FS.map_to_local(dataset_folder)\n os.makedirs(local_dataset_folder, exist_ok=True)\n is_flag = FS.get_dir_to_local_dir(ori_meta['work_dir'],\n local_dataset_folder)\n file_list = ori_meta['file_list']\n is_flag = FS.put_dir_from_local_dir(\n local_dataset_folder, dataset_folder)\n if not is_flag:\n raise gr.Error(self.components_name.illegal_data_err3)\n is_flag = FS.put_dir_from_local_dir(\n local_dataset_folder, dataset_folder)\n if not is_flag:\n raise gr.Error(self.components_name.illegal_data_err3)\n cursor = ori_meta['cursor']\n meta = self.construct_meta(cursor, file_list,\n dataset_folder, user_name)\n meta['local_work_dir'] = local_dataset_folder\n meta['work_dir'] = dataset_folder\n\n if prev_data_name in self.dataset_list:\n self.dataset_list.remove(prev_data_name)\n self.dataset_list.append(user_name)\n self.meta_dict.pop(prev_data_name)\n self.meta_dict[user_name] = meta\n _ = FS.delete_object(\n os.path.join(ori_meta['work_dir'], 'meta.json'))\n _ = FS.delete_object(\n os.path.join(ori_meta['local_work_dir'], 'meta.json'))\n else:\n raise gr.Error(self.components_name.modify_data_name_err1)\n return user_name, gr.Dropdown(\n choices=self.dataset_list,\n value=user_name,\n select_index=len(self.dataset_list) - 1)\n else:\n return user_name, gr.Dropdown()\n\n self.modify_data_button.click(\n modify_data_name,\n inputs=[self.user_data_name, self.user_data_name_state],\n outputs=[self.user_data_name_state, self.dataset_name])\n\n def dataset_change(user_name):\n if user_name is None or user_name == '':\n raise gr.Error(self.components_name.illegal_data_name_err5 +\n f'{user_name}')\n if user_name not in self.meta_dict:\n raise gr.Error(self.components_name.refresh_data_list_info1)\n return (gr.Column(visible=True), gr.Column(visible=False),\n gr.Checkbox(value=True, visible=False),\n gr.Text(value=user_name,\n interactive=True), gr.Text(value=user_name))\n\n self.dataset_name.change(dataset_change,\n inputs=[self.dataset_name],\n outputs=[\n self.dataset_panel, self.file_panel,\n self.panel_state, self.user_data_name,\n gallery_dataset.gallery_state\n ])" }, { "identifier": "DatasetGalleryUI", "path": "scepter/studio/preprocess/caption_editor_ui/dataset_gallery_ui.py", "snippet": "class DatasetGalleryUI(UIBase):\n def __init__(self, cfg, is_debug=False, language='en'):\n self.selected_path = ''\n self.selected_index = -1\n self.selected_index_prev = -1\n self.component_names = DatasetGalleryUIName(language)\n\n def create_ui(self):\n with gr.Row(variant='panel', visible=False,\n equal_height=True) as upload_panel:\n with gr.Column():\n self.upload_image = gr.Image(\n label=self.component_names.upload_image,\n tool='sketch',\n type='pil')\n with gr.Column(min_width=80):\n self.caption = gr.Textbox(\n label=self.component_names.image_caption,\n placeholder='',\n value='',\n lines=5)\n self.upload_button = gr.Button(\n value=self.component_names.upload_image_btn)\n\n with gr.Row(visible=False, equal_height=True) as gallery_panel:\n with gr.Row(visible=False):\n # self.gallery_state = gr.Checkbox(label='gallery_state', value=False, visible=False)\n self.cbg_hidden_dataset_filter = gr.CheckboxGroup(\n label='Dataset Filter')\n self.nb_hidden_dataset_filter_apply = gr.Number(\n label='Filter Apply', value=-1)\n self.btn_hidden_set_index = gr.Button(\n elem_id='dataset_tag_editor_btn_hidden_set_index')\n self.nb_hidden_image_index = gr.Number(value=None,\n label='hidden_idx_next')\n self.nb_hidden_image_index_prev = gr.Number(\n value=None, label='hidden_idx_prev')\n self.gallery_state = gr.Text(label='gallery_state',\n value='',\n visible=False)\n\n # with gr.Row(variant='panel', equal_height=True):\n with gr.Column(scale=1):\n self.gl_dataset_images = gr.Gallery(\n label=self.component_names.dataset_images,\n elem_id='dataset_tag_editor_dataset_gallery',\n columns=4)\n with gr.Column(scale=1):\n with gr.Row(equal_height=True):\n self.info = gr.Text(value='',\n label=None,\n show_label=False,\n interactive=False)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1, min_width=0):\n self.ori_caption = gr.Textbox(\n label=self.component_names.ori_caption,\n placeholder='',\n value='',\n lines=10,\n autoscroll=False,\n interactive=False)\n with gr.Column(scale=1, min_width=0):\n self.edit_caption = gr.Textbox(\n label=self.component_names.edit_caption,\n placeholder='',\n value='',\n lines=10,\n autoscroll=False,\n interactive=True)\n with gr.Row(equal_height=True):\n self.modify_button = gr.Button(\n value=self.component_names.btn_modify)\n with gr.Row(equal_height=True):\n self.delete_button = gr.Button(\n value=self.component_names.btn_delete)\n\n self.upload_panel = upload_panel\n self.gallery_panel = gallery_panel\n\n def set_callbacks(self, create_dataset: CreateDatasetUI):\n def change_gallery(dataset_name):\n meta_data = create_dataset.meta_dict[dataset_name]\n if len(meta_data['file_list']) > 0:\n cursor = create_dataset.meta_dict[dataset_name]['cursor']\n else:\n cursor = -1\n image_list = [\n os.path.join(meta_data['local_work_dir'], v['relative_path'])\n for v in meta_data['file_list']\n ]\n if cursor >= 0:\n return gr.Gallery(label=dataset_name,\n value=image_list,\n selected_index=cursor)\n else:\n return gr.Gallery(label=dataset_name,\n value=image_list,\n selected_index=None)\n\n self.gallery_state.change(change_gallery,\n inputs=[create_dataset.user_data_name],\n outputs=[self.gl_dataset_images])\n\n def select_image(dataset_name, evt: gr.SelectData):\n meta_data = create_dataset.meta_dict[dataset_name]\n if len(meta_data['file_list']) > 0:\n current_info = meta_data['file_list'][evt.index]\n create_dataset.meta_dict[dataset_name]['cursor'] = evt.index\n cursor = evt.index\n else:\n current_info = {'caption': ''}\n cursor = -1\n\n all_number = len(meta_data['file_list'])\n if cursor >= 0:\n return (gr.Gallery(selected_index=cursor),\n gr.Textbox(value=current_info['caption']),\n gr.Textbox(value=current_info['edit_caption']),\n gr.Text(value=f'{cursor+1}/{all_number}'))\n else:\n return (gr.Gallery(value=[], selected_index=None),\n gr.Textbox(value=current_info['caption']),\n gr.Textbox(value=current_info['edit_caption']),\n gr.Text(value=f'{cursor + 1}/{all_number}'))\n\n def change_image(dataset_name):\n meta_data = create_dataset.meta_dict[dataset_name]\n cursor = create_dataset.meta_dict[dataset_name]['cursor']\n if cursor >= 0:\n current_info = meta_data['file_list'][cursor]\n else:\n current_info = {'caption': '', 'edit_caption': ''}\n all_number = len(meta_data['file_list'])\n return (gr.Textbox(value=current_info['caption']),\n gr.Textbox(value=current_info['edit_caption']),\n gr.Text(value=f'{cursor+1}/{all_number}'))\n\n def change_caption(dataset_name, edit_caption):\n cursor = create_dataset.meta_dict[dataset_name]['cursor']\n create_dataset.meta_dict[dataset_name]['file_list'][cursor][\n 'caption'] = edit_caption\n create_dataset.save_meta(\n create_dataset.meta_dict[dataset_name],\n create_dataset.meta_dict[dataset_name]['work_dir'])\n return gr.Textbox(value=edit_caption)\n\n self.gl_dataset_images.select(select_image,\n inputs=[create_dataset.user_data_name],\n outputs=[\n self.gl_dataset_images,\n self.ori_caption, self.edit_caption,\n self.info\n ])\n self.gl_dataset_images.change(\n change_image,\n inputs=[create_dataset.user_data_name],\n outputs=[self.ori_caption, self.edit_caption, self.info])\n\n self.modify_button.click(\n change_caption,\n inputs=[create_dataset.user_data_name, self.edit_caption],\n outputs=[self.ori_caption])\n\n def delete_file(dataset_name):\n cursor = create_dataset.meta_dict[dataset_name]['cursor']\n if len(create_dataset.meta_dict[dataset_name]['file_list']) < 1:\n raise gr.Error(self.component_names.delete_err1)\n current_file = create_dataset.meta_dict[dataset_name][\n 'file_list'].pop(cursor)\n local_file = os.path.join(\n create_dataset.meta_dict[dataset_name]['local_work_dir'],\n current_file['relative_path'])\n try:\n os.remove(local_file)\n except Exception:\n print(f'remove file {local_file} error')\n if cursor >= len(\n create_dataset.meta_dict[dataset_name]['file_list']):\n cursor = 0\n create_dataset.meta_dict[dataset_name]['cursor'] = cursor\n create_dataset.save_meta(\n create_dataset.meta_dict[dataset_name],\n create_dataset.meta_dict[dataset_name]['work_dir'])\n current_info = create_dataset.meta_dict[dataset_name]['file_list'][\n cursor]\n image_list = [\n os.path.join(\n create_dataset.meta_dict[dataset_name]['local_work_dir'],\n v['relative_path'])\n for v in create_dataset.meta_dict[dataset_name]['file_list']\n ]\n return (gr.Gallery(value=image_list, selected_index=cursor),\n gr.Textbox(value=current_info['caption']),\n gr.Textbox(value=current_info['caption']\n if current_info['edit_caption'] ==\n '' else current_info['edit_caption']),\n gr.Text(value=f'{cursor + 1}/{len(image_list)}'))\n\n self.delete_button.click(delete_file,\n inputs=[create_dataset.user_data_name],\n outputs=[\n self.gl_dataset_images, self.ori_caption,\n self.edit_caption, self.info\n ])\n\n def add_file(dataset_name, upload_image, caption):\n if 'image' in upload_image:\n image = upload_image['image']\n\n else:\n image = upload_image\n w, h = image.size\n meta = create_dataset.meta_dict[dataset_name]\n local_work_dir = meta['local_work_dir']\n work_dir = meta['work_dir']\n\n save_folder = os.path.join(local_work_dir, 'images')\n os.makedirs(save_folder, exist_ok=True)\n\n relative_path = os.path.join('images',\n f'{imagehash.phash(image)}.png')\n image_path = os.path.join(work_dir, relative_path)\n\n local_image_path = os.path.join(local_work_dir, relative_path)\n with FS.put_to(image_path) as local_path:\n image.save(local_path)\n\n image.save(local_image_path)\n\n meta['file_list'].append({\n 'image_path': image_path,\n 'relative_path': relative_path,\n 'width': w,\n 'height': h,\n 'caption': caption,\n 'prefix': '',\n 'edit_caption': caption\n })\n\n meta['cursor'] = len(meta['file_list']) - 1\n create_dataset.meta_dict[dataset_name] = meta\n image_list = [\n os.path.join(\n create_dataset.meta_dict[dataset_name]['local_work_dir'],\n v['relative_path'])\n for v in create_dataset.meta_dict[dataset_name]['file_list']\n ]\n return (gr.Gallery(value=image_list,\n selected_index=meta['cursor']),\n gr.Textbox(value=caption), gr.Textbox(value=caption),\n gr.Text(value=f\"{meta['cursor'] + 1}/{len(image_list)}\"))\n\n self.upload_button.click(add_file,\n inputs=[\n create_dataset.user_data_name,\n self.upload_image, self.caption\n ],\n outputs=[\n self.gl_dataset_images, self.ori_caption,\n self.edit_caption, self.info\n ])\n\n def edit_caption_change(dataset_name, edit_caption):\n meta = create_dataset.meta_dict[dataset_name]\n cursor = meta['cursor']\n if cursor >= 0:\n meta['file_list'][cursor]['edit_caption'] = edit_caption\n\n self.edit_caption.change(\n edit_caption_change,\n inputs=[create_dataset.user_data_name, self.edit_caption])" }, { "identifier": "ExportDatasetUI", "path": "scepter/studio/preprocess/caption_editor_ui/export_dataset_ui.py", "snippet": "class ExportDatasetUI(UIBase):\n def __init__(self, cfg, is_debug=False, language='en'):\n self.dataset_name = ''\n self.work_dir = cfg.WORK_DIR\n self.export_folder = os.path.join(self.work_dir, cfg.EXPORT_DIR)\n self.component_names = ExportDatasetUIName(language)\n\n def create_ui(self):\n with gr.Row(variant='panel', visible=False,\n equal_height=True) as export_panel:\n self.data_state = gr.State(value=False)\n with gr.Column(scale=1, min_width=0):\n self.export_to_zip = gr.Button(\n value=self.component_names.btn_export_zip)\n self.export_url = gr.File(\n label=self.component_names.export_file,\n visible=False,\n value=None,\n interactive=False,\n show_label=True)\n with gr.Column(scale=1, min_width=0):\n self.go_to_train = gr.Button(\n value=self.component_names.go_to_train, size='lg')\n self.export_panel = export_panel\n\n def set_callbacks(self, create_dataset, manager):\n def export_zip(dataset_name):\n meta = create_dataset.meta_dict[dataset_name]\n work_dir = meta['work_dir']\n local_work_dir = meta['local_work_dir']\n train_csv = os.path.join(work_dir, 'train.csv')\n if len(meta['file_list']) < 1:\n raise gr.Error(self.component_names.export_err1)\n train_csv = create_dataset.write_csv(meta['file_list'], train_csv,\n work_dir)\n _ = FS.get_from(train_csv, os.path.join(local_work_dir,\n 'train.csv'))\n save_file_list = work_dir + '_file.csv'\n save_file_list = create_dataset.write_file_list(\n meta['file_list'], save_file_list)\n _ = FS.get_from(save_file_list,\n os.path.join(local_work_dir, 'file.csv'))\n zip_path = os.path.join(self.export_folder, f'{dataset_name}.zip')\n with FS.put_to(zip_path) as local_zip:\n res = os.popen(\n f\"cd '{local_work_dir}' && mkdir -p '{dataset_name}' \"\n f\"&& cp -rf images '{dataset_name}/images' \"\n f\"&& cp -rf train.csv '{dataset_name}/train.csv' \"\n f\"&& zip -r '{os.path.abspath(local_zip)}' '{dataset_name}'/* \"\n f\"&& rm -rf '{dataset_name}'\")\n print(res.readlines())\n\n if not FS.exists(zip_path):\n raise gr.Error(self.component_names.export_zip_err1)\n create_dataset.save_meta(meta, work_dir)\n local_zip = FS.get_from(zip_path)\n return gr.File(value=local_zip, visible=True)\n\n self.export_to_zip.click(export_zip,\n inputs=[create_dataset.user_data_name],\n outputs=[self.export_url])\n\n def export_csv(dataset_name):\n meta = create_dataset.meta_dict[dataset_name]\n work_dir = meta['work_dir']\n local_work_dir = meta['local_work_dir']\n train_csv = os.path.join(work_dir, 'train.csv')\n if len(meta['file_list']) < 1:\n raise gr.Error(self.component_names.export_err1)\n train_csv = create_dataset.write_csv(meta['file_list'], train_csv,\n work_dir)\n _ = FS.get_from(train_csv, os.path.join(local_work_dir,\n 'train.csv'))\n save_file_list = os.path.join(work_dir, 'file.csv')\n save_file_list = create_dataset.write_file_list(\n meta['file_list'], save_file_list)\n local_file_csv = FS.get_from(\n save_file_list, os.path.join(local_work_dir, 'file.csv'))\n create_dataset.save_meta(meta, work_dir)\n is_flag = FS.put_object_from_local_file(\n local_file_csv,\n os.path.join(self.export_folder, dataset_name + '_file.csv'))\n if not is_flag:\n raise gr.Error(self.component_names.upload_err1)\n list_url = FS.get_url(os.path.join(self.export_folder,\n dataset_name + '_file.csv'),\n set_public=True)\n list_url = parse.unquote(list_url)\n if 'wulanchabu' in list_url:\n list_url = list_url.replace(\n '.cn-wulanchabu.oss-internal.aliyun-inc.',\n '.oss-cn-wulanchabu.aliyuncs.')\n else:\n list_url = list_url.replace('.oss-internal.aliyun-inc.',\n '.oss.aliyuncs.')\n if not list_url.split('/')[-1] == dataset_name + '_file.csv':\n list_url = os.path.join(os.path.dirname(list_url),\n dataset_name + '_file.csv')\n return gr.Text(value=list_url)\n\n # self.export_to_list.click(export_csv,\n # inputs=[create_dataset.user_data_name],\n # outputs=[self.export_url])\n\n def go_to_train(dataset_name):\n meta = create_dataset.meta_dict[dataset_name]\n work_dir = meta['work_dir']\n local_work_dir = meta['local_work_dir']\n train_csv = os.path.join(work_dir, 'train.csv')\n if len(meta['file_list']) < 1:\n raise gr.Error(self.component_names.export_err1)\n train_csv = create_dataset.write_csv(meta['file_list'], train_csv,\n work_dir)\n _ = FS.get_from(train_csv, os.path.join(local_work_dir,\n 'train.csv'))\n save_file_list = work_dir + '_file.csv'\n _ = create_dataset.write_file_list(meta['file_list'],\n save_file_list)\n return (gr.Tabs(selected='self_train'),\n gr.Textbox(value=os.path.abspath(local_work_dir)))\n\n self.go_to_train.click(\n go_to_train,\n inputs=[create_dataset.user_data_name],\n outputs=[manager.tabs, manager.self_train.trainer_ui.ms_data_name])" }, { "identifier": "init_env", "path": "scepter/studio/utils/env.py", "snippet": "def init_env(cfg_general):\n work_dir = cfg_general.WORK_DIR\n file_system = cfg_general.get('FILE_SYSTEM', None)\n if file_system is not None:\n if isinstance(file_system, list):\n for file_sys in file_system:\n _prefix = FS.init_fs_client(file_sys)\n elif file_system is not None:\n _prefix = FS.init_fs_client(file_system) # noqa\n is_flag = FS.make_dir(work_dir)\n assert is_flag\n return cfg_general" } ]
import os.path import gradio as gr from scepter.modules.utils.config import Config from scepter.modules.utils.file_system import FS from scepter.studio.preprocess.caption_editor_ui.create_dataset_ui import \ CreateDatasetUI from scepter.studio.preprocess.caption_editor_ui.dataset_gallery_ui import \ DatasetGalleryUI from scepter.studio.preprocess.caption_editor_ui.export_dataset_ui import \ ExportDatasetUI from scepter.studio.utils.env import init_env
13,149
# -*- coding: utf-8 -*- class PreprocessUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR)
# -*- coding: utf-8 -*- class PreprocessUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR)
if not FS.exists(cfg_general.WORK_DIR):
1
2023-12-21 02:01:48+00:00
16k
RomGai/BrainVis
dc_ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "dc_ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "dc_ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "dc_ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "dc_ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "dc_ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "dc_ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "dc_ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "dc_ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "dc_ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "dc_ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "dc_ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "dc_ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "dc_ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "dc_ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n self.trainable = False\n \n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=False, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=False, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=False, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "dc_ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "dc_ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "PLMSSampler", "path": "dc_ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, generator=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device, generator=generator)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps, t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "get_similarity_metric", "path": "eval_metrics.py", "snippet": "def get_similarity_metric(img1, img2, method='pair-wise', metric_name='mse', **kwargs):\n # img1: n, w, h, 3\n # img2: n, w, h, 3\n # all in pixel values: 0 ~ 255\n # return: list of scores 0 ~ 1.\n if img1.shape[-1] != 3:\n img1 = rearrange(img1, 'n c w h -> n w h c')\n if img2.shape[-1] != 3:\n img2 = rearrange(img2, 'n c w h -> n w h c')\n\n if method == 'pair-wise':\n eval_procedure_func = pair_wise_score \n elif method == 'n-way':\n eval_procedure_func = n_way_scores\n elif method == 'metrics-only':\n eval_procedure_func = metrics_only\n elif method == 'class':\n return get_n_way_top_k_acc(img1, img2, **kwargs)\n else:\n raise NotImplementedError\n\n if metric_name == 'mse':\n metric_func = mse_metric\n decision_func = smaller_the_better\n elif metric_name == 'pcc':\n metric_func = pcc_metric\n decision_func = larger_the_better\n elif metric_name == 'ssim':\n metric_func = ssim_metric\n decision_func = larger_the_better\n elif metric_name == 'psm':\n metric_func = psm_wrapper()\n decision_func = smaller_the_better\n elif metric_name == 'fid':\n metric_func = fid_wrapper()\n decision_func = smaller_the_better\n else:\n raise NotImplementedError\n \n return eval_procedure_func(img1, img2, metric_func, decision_func, **kwargs)" }, { "identifier": "FrozenImageEmbedder", "path": "dc_ldm/modules/encoders/modules.py", "snippet": "class FrozenImageEmbedder(AbstractEncoder):\n \"\"\"Uses the CLIP transformer encoder for text (from Hugging Face)\"\"\"\n def __init__(self, version=\"openai/clip-vit-large-patch14\", device=\"cuda\", max_length=77):\n super().__init__()\n # self.processor = AutoProcessor.from_pretrained(version)\n self.transformer = CLIPVisionModelWithProjection.from_pretrained(version)\n self.device = device\n self.max_length = max_length\n self.freeze()\n\n\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, inputs):\n # image = Image.open(requests.get(url, stream=True).raw)\n # inputs = self.processor(images=image, return_tensors=\"pt\")\n outputs = self.transformer(**inputs)\n image_embeds = outputs.image_embeds\n return image_embeds\n # z = outputs.last_hidden_state\n\n # return z\n\n def encode(self, inputs):\n return self(inputs)" } ]
import os import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.rank_zero import rank_zero_only from dc_ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from dc_ldm.modules.ema import LitEma from dc_ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from dc_ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from dc_ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from dc_ldm.models.diffusion.ddim import DDIMSampler from dc_ldm.models.diffusion.plms import PLMSSampler from PIL import Image from eval_metrics import get_similarity_metric from dc_ldm.modules.encoders.modules import FrozenImageEmbedder
14,227
b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ddim_steps=300 ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.validation_count = 0 self.ddim_steps = ddim_steps self.return_cond = False self.output_path = None self.main_config = None self.best_val = 0.0 self.run_full_validation_threshold = 0.0 self.eval_avg = True def re_init_ema(self): if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.train() self.cond_stage_model.train() ###到底是在哪里训练的 loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=False, on_epoch=True) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=False, on_epoch=True) return loss @torch.no_grad() def generate(self, data, num_samples, ddim_steps=300, HW=None, limit=None, state=None): # fmri_embedding: n, seq_len, embed_dim all_samples = [] if HW is None: shape = (self.p_channels, self.p_image_size, self.p_image_size) else: num_resolutions = len(self.ch_mult) shape = (self.p_channels, HW[0] // 2**(num_resolutions-1), HW[1] // 2**(num_resolutions-1)) model = self sampler = PLMSSampler(model) # sampler = DDIMSampler(model) model.eval() if torch.cuda.is_available(): state = torch.cuda.get_rng_state() if state is None else state torch.cuda.set_rng_state(state) else: state = torch.get_rng_state() if state is None else state torch.set_rng_state(state) # rng = torch.Generator(device=self.device).manual_seed(2022).set_state(state) # state = torch.cuda.get_rng_state() with model.ema_scope(): for count, item in enumerate(zip(data['eeg'], data['image'])): if limit is not None: if count >= limit: break latent = item[0] # fmri embedding gt_image = rearrange(item[1], 'h w c -> 1 c h w') # h w c print(f"rendering {num_samples} examples in {ddim_steps} steps.") # c = model.get_learned_conditioning(repeat(latent, 'h w -> c h w', c=num_samples).to(self.device)) c, re_latent = model.get_learned_conditioning(repeat(latent, 'h w -> c h w', c=num_samples).to(self.device)) samples_ddim, _ = sampler.sample(S=ddim_steps, conditioning=c, batch_size=num_samples, shape=shape, verbose=False, generator=None) x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0) gt_image = torch.clamp((gt_image+1.0)/2.0,min=0.0, max=1.0) all_samples.append(torch.cat([gt_image.detach().cpu(), x_samples_ddim.detach().cpu()], dim=0)) # put groundtruth at first # display as grid grid = torch.stack(all_samples, 0) grid = rearrange(grid, 'n b c h w -> (n b) c h w') grid = make_grid(grid, nrow=num_samples+1) # to image grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() return grid, (255. * torch.stack(all_samples, 0).cpu().numpy()).astype(np.uint8), state def save_images(self, all_samples, suffix=0): # print('output_path') # print(self.output_path) if self.output_path is not None: os.makedirs(os.path.join(self.output_path, 'val', f'{self.validation_count}_{suffix}'), exist_ok=True) for sp_idx, imgs in enumerate(all_samples): # for copy_idx, img in enumerate(imgs[1:]): for copy_idx, img in enumerate(imgs): img = rearrange(img, 'c h w -> h w c') Image.fromarray(img).save(os.path.join(self.output_path, 'val', f'{self.validation_count}_{suffix}', f'test{sp_idx}-{copy_idx}.png')) def full_validation(self, batch, state=None): print('###### run full validation! ######\n') grid, all_samples, state = self.generate(batch, ddim_steps=self.ddim_steps, num_samples=5, limit=None, state=state) metric, metric_list = self.get_eval_metric(all_samples) self.save_images(all_samples, suffix='%.4f'%metric[-1]) metric_dict = {f'val/{k}_full':v for k, v in zip(metric_list, metric)} # self.logger.log_metrics(metric_dict) grid_imgs = Image.fromarray(grid.astype(np.uint8)) # self.logger.log_image(key=f'samples_test_full', images=[grid_imgs]) if metric[-1] > self.best_val: self.best_val = metric[-1] torch.save( { 'model_state_dict': self.state_dict(), 'config': self.main_config, 'state': state }, os.path.join(self.output_path, 'checkpoint_best.pth') ) @torch.no_grad() def validation_step(self, batch, batch_idx): if batch_idx != 0: return if self.validation_count % 5 == 0 and self.trainer.current_epoch != 0: self.full_validation(batch) else: # pass grid, all_samples, state = self.generate(batch, ddim_steps=self.ddim_steps, num_samples=3, limit=5) metric, metric_list = self.get_eval_metric(all_samples, avg=self.eval_avg) grid_imgs = Image.fromarray(grid.astype(np.uint8)) # self.logger.log_image(key=f'samples_test', images=[grid_imgs]) metric_dict = {f'val/{k}':v for k, v in zip(metric_list, metric)} # self.logger.log_metrics(metric_dict) if metric[-1] > self.run_full_validation_threshold: self.full_validation(batch, state=state) self.validation_count += 1 def get_eval_metric(self, samples, avg=True): metric_list = ['mse', 'pcc', 'ssim', 'psm'] res_list = [] gt_images = [img[0] for img in samples] gt_images = rearrange(np.stack(gt_images), 'n c h w -> n h w c') samples_to_run = np.arange(1, len(samples[0])) if avg else [1] for m in metric_list: res_part = [] for s in samples_to_run: pred_images = [img[s] for img in samples] pred_images = rearrange(np.stack(pred_images), 'n c h w -> n h w c') res = get_similarity_metric(pred_images, gt_images, method='pair-wise', metric_name=m) res_part.append(np.mean(res)) res_list.append(np.mean(res_part)) res_part = [] for s in samples_to_run: pred_images = [img[s] for img in samples] pred_images = rearrange(np.stack(pred_images), 'n c h w -> n h w c') res = get_similarity_metric(pred_images, gt_images, 'class', None, n_way=50, num_trials=50, top_k=1, device='cuda') res_part.append(np.mean(res)) res_list.append(np.mean(res_part)) res_list.append(np.max(res_part)) metric_list.append('top-1-class') metric_list.append('top-1-class (max)') return res_list, metric_list def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=True, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.train_cond_stage_only = False self.clip_tune = True if self.clip_tune: self.image_embedder = FrozenImageEmbedder() self.cls_tune = False def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() def freeze_diffusion_model(self): for param in self.model.parameters(): param.requires_grad = False def unfreeze_diffusion_model(self): for param in self.model.parameters(): param.requires_grad = True def freeze_cond_stage(self): for param in self.cond_stage_model.parameters(): param.requires_grad = False def unfreeze_cond_stage(self): for param in self.cond_stage_model.parameters(): param.requires_grad = True def freeze_first_stage(self): self.first_stage_model.trainable = False for param in self.first_stage_model.parameters(): param.requires_grad = False def unfreeze_first_stage(self): self.first_stage_model.trainable = True for param in self.first_stage_model.parameters(): param.requires_grad = True def freeze_whole_model(self): self.first_stage_model.trainable = False for param in self.parameters(): param.requires_grad = False def unfreeze_whole_model(self): self.first_stage_model.trainable = True for param in self.parameters(): param.requires_grad = True def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() # self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): # self.cond_stage_model.eval() if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c, re_latent = self.cond_stage_model.encode(c) # c = self.cond_stage_model.encode(c) else: c, re_latent = self.cond_stage_model(c) # c = self.cond_stage_model(c) # return c return c, re_latent def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) # print('encoder_posterior.shape') # print(encoder_posterior.shape) z = self.get_first_stage_encoding(encoder_posterior).detach() # print('z.shape') # print(z.shape) # print(cond_key) # print(self.cond_stage_key) # print(cond_key) if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox','fmri', 'eeg']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x # print('get input') # print(not self.cond_stage_trainable) # print(force_c_encode) if not self.cond_stage_trainable or force_c_encode : # print('get learned condition') if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c, re_latent = self.get_learned_conditioning(xc) # c = self.get_learned_conditioning(xc) else: c, re_latent = self.get_learned_conditioning(xc.to(self.device)) # c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c , batch['label'], batch['image_raw']] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): self.freeze_first_stage() # print('share step\'s get input') x, c, label, image_raw = self.get_input(batch, self.first_stage_key) # print('get input shape') # print('x.shape') # print(x.shape) # print('c.shape') # print(c.shape) if self.return_cond: loss, cc = self(x, c, label, image_raw) return loss, cc else: loss = self(x, c, label, image_raw) return loss def forward(self, x, c, label, image_raw, *args, **kwargs): # print(self.num_timesteps) t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() # print('t.shape') # print(t.shape) if self.model.conditioning_key is not None: assert c is not None imgs = c if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) c, re_latent = self.get_learned_conditioning(c) # print('c.shape') # print(c.shape) prefix = 'train' if self.training else 'val' loss, loss_dict = self.p_losses(x, c, t, *args, **kwargs) # pre_cls = self.cond_stage_model.get_cls(re_latent) # rencon = self.cond_stage_model.recon(re_latent) if self.clip_tune: image_embeds = self.image_embedder(image_raw) loss_clip = self.cond_stage_model.get_clip_loss(re_latent, image_embeds) # loss_recon = self.recon_loss(imgs, rencon) # loss_cls = self.cls_loss(label, pre_cls) loss += loss_clip # loss += loss_cls # loss_recon + #(self.original_elbo_weight * loss_vlb) # loss_dict.update({f'{prefix}/loss_recon': loss_recon}) # loss_dict.update({f'{prefix}/loss_cls': loss_cls}) loss_dict.update({f'{prefix}/loss_clip': loss_clip}) if self.cls_tune: pre_cls = self.cond_stage_model.get_cls(re_latent) loss_cls = self.cls_loss(label, pre_cls) # image_embeds = self.image_embedder(image_raw) # loss_clip = self.cond_stage_model.get_clip_loss(re_latent, image_embeds) # loss_recon = self.recon_loss(imgs, rencon) # loss_cls = self.cls_loss(label, pre_cls) loss += loss_cls # loss += loss_cls # loss_recon + #(self.original_elbo_weight * loss_vlb) # loss_dict.update({f'{prefix}/loss_recon': loss_recon}) # loss_dict.update({f'{prefix}/loss_cls': loss_cls}) loss_dict.update({f'{prefix}/loss_cls': loss_cls}) # if self.return_cond: # return self.p_losses(x, c, t, *args, **kwargs), c # return self.p_losses(x, c, t, *args, **kwargs) if self.return_cond: return loss, loss_dict, c return loss, loss_dict # def recon_loss(self, ) def recon_loss(self, imgs, pred): """ imgs: [N, 1, num_voxels] pred: [N, L, p] mask: [N, L], 0 is keep, 1 is remove, """ # target = self.patchify(imgs) loss = (pred - imgs) ** 2 loss = loss.mean() # loss = loss.mean(dim=-1) # [N, L], mean loss per patch # loss = (loss * mask).sum() / mask.sum() if mask.sum() != 0 else (loss * mask).sum() # mean loss on removed patches return loss def cls_loss(self, label, pred): return torch.nn.CrossEntropyLoss()(pred, label) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) # print('x_recon') # if isinstance(x_recon, tuple): # print('is tuple') # # print(len(x_recon)) # # print(x_recon[0].shape) # else: # print(x_recon.shape) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) # print('p_losses') # print('noise.shape') # print(noise.shape) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) # print('x_noisy[0].shape') # print(x_noisy[0].shape) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
17
2023-12-16 12:52:14+00:00
16k
tonnetonne814/PL-Bert-VITS2
train_ms.py
[ { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n i=0\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n # sid = 1\n max_bert_len = max([x[4].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n bert_lengths = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n bert_padded = torch.FloatTensor(len(batch), 13, max_bert_len, 768)\n\n text_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n bert = row[4]\n bert_padded[i, :, :bert.size(1),:] = bert\n bert_lengths[i] = bert.size(1)\n\n\n if self.return_ids:\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n ids_sorted_decreasing,\n )\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n bert_padded,\n bert_lengths,\n sid,\n )" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.hparams = hparams\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 999)\n self.min_audio_len = getattr(hparams, \"min_audio_len\", 8192)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n self.count = 0\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n for data in self.audiopaths_sid_text:\n audiopath, sid, ph, text, bert, emo, style = data\n if not os.path.isfile(audiopath):\n continue\n if self.min_text_len <= len(text) and len(text) <= self.max_text_len:\n audiopaths_sid_text_new.append([audiopath, sid, ph, text, bert, emo, style])\n length = os.path.getsize(audiopath) // (2 * self.hop_length)\n if length < self.min_audio_len // self.hop_length:\n print(\"DATA PASS\")\n continue\n lengths.append(length)\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n print(f\"INFO:{len(self.audiopaths_sid_text)} is used as Training Dataset.\")\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, ph, text, pl_bert, emo, style = (\n audiopath_sid_text[0],\n audiopath_sid_text[1],\n audiopath_sid_text[2],\n audiopath_sid_text[3],\n audiopath_sid_text[4],\n audiopath_sid_text[5],\n audiopath_sid_text[6],\n )\n ph = self.get_text(ph)\n spec, wav = self.get_audio(audiopath)\n bert = self.get_pl_bert(pl_bert)\n sid = self.get_sid(sid)\n\n # parameter checker \n assert len(ph) == bert.size(1)\n\n return (ph, spec, wav, sid, bert)\n \n def get_pl_bert(self, filename):\n path = os.path.join(\"pl_bert_embeddings\", f\"{filename}.PlBertJa\")\n data = torch.load(path)\n if self.add_blank:\n L, T, H = data.shape\n new_data = torch.zeros(size=(L,2*T+1,H), dtype=data.dtype)\n for idx in range(T):\n target_idx = idx*2+1\n new_data[:, target_idx, :] = data[:, idx, :]\n data = new_data\n return data\n\n def get_audio(self, filename):\n # TODO : if linear spec exists convert to mel from existing linear spec\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate\n )\n )\n # audio_norm = audio / self.max_wav_value\n audio_norm = audio.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n if os.path.exists(spec_filename):\n spec = torch.load(spec_filename)\n else:\n if self.use_mel_spec_posterior:\n \"\"\"TODO : (need verification)\n if linear spec exists convert to\n mel from existing linear spec (uncomment below lines)\"\"\"\n # if os.path.exists(filename.replace(\".wav\", \".spec.pt\")):\n # # spec, n_fft, num_mels, sampling_rate, fmin, fmax\n # spec = spec_to_mel_torch(\n # torch.load(filename.replace(\".wav\", \".spec.pt\")),\n # self.filter_length, self.n_mel_channels, self.sampling_rate,\n # self.hparams.mel_fmin, self.hparams.mel_fmax)\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text):\n if self.cleaned_text:\n text_norm = cleaned_text_to_sequence(text)\n else:\n text_norm = text_to_sequence(text, self.text_cleaners)\n if self.add_blank:\n text_norm = commons.intersperse(text_norm, 0)\n text_norm = torch.LongTensor(text_norm)\n return text_norm\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n if version.parse(torch.__version__) >= version.parse(\"2\"):\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n else:\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(\n sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax\n )\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES", "path": "models.py", "snippet": "AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [\n \"dur_disc_1\",\n \"dur_disc_2\",\n]" }, { "identifier": "AVAILABLE_FLOW_TYPES", "path": "models.py", "snippet": "AVAILABLE_FLOW_TYPES = [\n \"pre_conv\",\n \"pre_conv2\",\n \"fft\",\n \"mono_layer_inter_residual\",\n \"mono_layer_post_residual\",\n]" }, { "identifier": "DurationDiscriminatorV1", "path": "models.py", "snippet": "class DurationDiscriminatorV1(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n # self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_1(x)\n # x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.pre_out_norm_2(x)\n # x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_1(x)\n # x = self.drop(x)\n x = self.conv_2(x * x_mask)\n # x = torch.relu(x)\n # x = self.norm_2(x)\n # x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "DurationDiscriminatorV2", "path": "models.py", "snippet": "class DurationDiscriminatorV2(nn.Module): # vits2\n # TODO : not using \"spk conditioning\" for now according to the paper.\n # Can be a better discriminator if we use it.\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n # if gin_channels != 0:\n # self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n # if g is not None:\n # g = torch.detach(g)\n # x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append([output_prob])\n\n return output_probs" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11, 17, 23, 37]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n bert_emb_size,\n n_speakers=0,\n gin_channels=0,\n use_sdp=True,\n **kwargs,\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", False\n )\n self.use_transformer_flows = kwargs.get(\"use_transformer_flows\", False)\n self.transformer_flow_type = kwargs.get(\n \"transformer_flow_type\", \"mono_layer_post_residual\"\n )\n if self.use_transformer_flows:\n assert (\n self.transformer_flow_type in AVAILABLE_FLOW_TYPES\n ), f\"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}\"\n self.use_sdp = use_sdp\n # self.use_duration_discriminator = kwargs.get(\"use_duration_discriminator\", False)\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n else:\n self.enc_gin_channels = 0\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n bert_emb_size=bert_emb_size,\n gin_channels=self.enc_gin_channels,\n )\n\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n # self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)\n self.flow = ResidualCouplingTransformersBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 4,\n gin_channels=gin_channels,\n use_transformer_flows=self.use_transformer_flows,\n transformer_flow_type=self.transformer_flow_type,\n )\n\n if use_sdp:\n self.dp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n else:\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n\n # 重み付け加算式を取る\n self.WSL = WeightSumLayer(n_layers=13)\n\n def forward(self, x, x_lengths, y, y_lengths, bert, bert_lengths, sid=None):\n bert = self.WSL(bert)\n\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n if self.use_sdp:\n l_length = self.dp(x, x_mask, w, g=g)\n l_length = l_length / torch.sum(x_mask)\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n logw_ = torch.log(w + 1e-6) * x_mask\n else:\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n bert,\n bert_lengths,\n sid=None,\n noise_scale=1,\n length_scale=1,\n noise_scale_w=1.0,\n max_len=None,\n ):\n bert = self.WSL(bert)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = None\n x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, bert, bert_lengths, g=g)\n if self.use_sdp:\n logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)\n else:\n logw = self.dp(x, x_mask, g=g)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n # currently vits-2 is not capable of voice conversion\n ## comment - choihkk\n ## Assuming the use of the ResidualCouplingTransformersLayer2 module, it seems that voice conversion is possible \n def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):\n assert self.n_speakers > 0, \"n_speakers have to be larger than 0.\"\n g_src = self.emb_g(sid_src).unsqueeze(-1)\n g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)\n z_p = self.flow(z, y_mask, g=g_src)\n z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)\n o_hat = self.dec(z_hat * y_mask, g=g_tgt)\n return o_hat, y_mask, (z, z_p, z_hat)" }, { "identifier": "symbols", "path": "PL_BERT_ja/text/symbols.py", "snippet": "" } ]
import argparse import itertools import json import math import os import logging import torch import torch.distributed as dist import torch.multiprocessing as mp import tqdm import commons import models import utils from torch import nn, optim from torch.cuda.amp import GradScaler, autocast from torch.nn import functional as F from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from data_utils import (DistributedBucketSampler, TextAudioSpeakerCollate, TextAudioSpeakerLoader) from losses import discriminator_loss, feature_loss, generator_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from models import (AVAILABLE_DURATION_DISCRIMINATOR_TYPES, AVAILABLE_FLOW_TYPES, DurationDiscriminatorV1, DurationDiscriminatorV2, MultiPeriodDiscriminator, SynthesizerTrn) from PL_BERT_ja.text.symbols import symbols
11,091
train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn( len(symbols)+1, posterior_channels, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank)
numba_logger = logging.getLogger('numba') numba_logger.setLevel(logging.WARNING) # from tensorboardX import SummaryWriter torch.backends.cudnn.benchmark = True global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "6060" hps = utils.get_hparams() mp.spawn( run, nprocs=n_gpus, args=( n_gpus, hps, ), ) def run(rank, n_gpus, hps): net_dur_disc = None global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group( backend="nccl", init_method="env://", world_size=n_gpus, rank=rank ) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) if ( "use_mel_posterior_encoder" in hps.model.keys() and hps.model.use_mel_posterior_encoder == True ): print("Using mel posterior encoder for VITS2") posterior_channels = 128 # vits2 hps.data.use_mel_posterior_encoder = True else: print("Using lin posterior encoder for VITS1") posterior_channels = hps.data.filter_length // 2 + 1 hps.data.use_mel_posterior_encoder = False train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 500, 700, 900, 1100, 1300, 1500, 3000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, ) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) # some of these flags are not being used in the code and directly set in hps json file. # they are kept here for reference and prototyping. if ( "use_transformer_flows" in hps.model.keys() and hps.model.use_transformer_flows == True ): use_transformer_flows = True transformer_flow_type = hps.model.transformer_flow_type print(f"Using transformer flows {transformer_flow_type} for VITS2") assert ( transformer_flow_type in AVAILABLE_FLOW_TYPES ), f"transformer_flow_type must be one of {AVAILABLE_FLOW_TYPES}" else: print("Using normal flows for VITS1") use_transformer_flows = False if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True ): print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True ): # print("Using duration discriminator for VITS2") use_duration_discriminator = True # comment - choihkk # add duration discriminator type here # I think it would be a good idea to come up with a method to input this part accurately, like a hydra duration_discriminator_type = getattr( hps.model, "duration_discriminator_type", "dur_disc_1" ) print(f"Using duration_discriminator {duration_discriminator_type} for VITS2") assert ( duration_discriminator_type in AVAILABLE_DURATION_DISCRIMINATOR_TYPES ), f"duration_discriminator_type must be one of {AVAILABLE_DURATION_DISCRIMINATOR_TYPES}" # duration_discriminator_type = AVAILABLE_DURATION_DISCRIMINATOR_TYPES # ここ修正 if duration_discriminator_type == "dur_disc_1": net_dur_disc = DurationDiscriminatorV1( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) elif duration_discriminator_type == "dur_disc_2": net_dur_disc = DurationDiscriminatorV2( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) else: print("NOT using any duration discriminator like VITS1") net_dur_disc = None use_duration_discriminator = False net_g = SynthesizerTrn( len(symbols)+1, posterior_channels, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank)
net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
13
2023-12-16 05:34:02+00:00
16k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py
[ { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" }, { "identifier": "MultiHeadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/attention_rim.py", "snippet": "class MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model_read, d_model_write, d_model_out, d_k, d_v, grad_sparse, residual=True, dropout=0.1, skip_write=False, flag=False):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Initialize Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print('d model read: ', d_model_read)\n # print('d_model_write: ', d_model_write)\n # print('d_model_out: ', d_model_out)\n # print('n_head: ', n_head)\n # print('d_k: ', d_k)\n # print('d_v: ', d_v)\n # print('num_blocks_read: ', num_blocks_read)\n # print('num_blocks_write: ', num_blocks_write)\n # input()\n\n self.GLN_qs = nn.Linear(d_model_read, n_head * d_k)\n self.GLN_ks = nn.Linear(d_model_write, n_head * d_k)\n self.GLN_vs = nn.Linear(d_model_write, n_head * d_v)\n\n self.residual = residual\n\n #self.w_qs = nn.Linear(d_model_read, n_head * d_k)\n #self.w_ks = nn.Linear(d_model_write, n_head * d_k)\n #self.w_vs = nn.Linear(d_model_write, n_head * d_v)\n\n #nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n #nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5), flag=flag)\n #self.layer_norm = nn.LayerNorm(d_model)\n\n self.gate_fc = nn.Linear(n_head * d_v, d_model_out)\n\n if not skip_write:\n self.fc = nn.Linear(n_head * d_v, d_model_out)\n else:\n self.fc = lambda a: a\n\n #nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n self.ln = nn.LayerNorm(d_model_out)\n\n def forward(self, q, k, v, mask=None):\n\n #print('attn input shape', q.shape)\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size()\n\n residual = q\n\n #print('q shape', q.shape)\n\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Forward of Multi-Head Attention~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n # print(\"q: \", q.size())\n # print(\"k: \", k.size())\n # print(\"v: \", v.size())\n # input()\n\n q = self.GLN_qs(q).view(sz_b, len_q, n_head, d_k)\n #q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.GLN_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.GLN_vs(v).reshape(sz_b, len_v, n_head, d_v)\n #v = v.view(sz_b, len_v, n_head, d_v)\n\n # print(\"GLN q: \", q.size())\n # print(\"GLN k: \", k.size())\n # print(\"GLN v: \", v.size())\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n # print(\"Permute q: \", q.size())\n # print(\"Permute k: \", k.size())\n # print(\"Permute v: \", v.size())\n\n #mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn, extra_loss = self.attention(q, k, v, mask=None)\n\n # print(\"Output: \", output.size())\n # print(\"Attention: \", attn.size())\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n # print(\"Here Output: \", output.size())\n\n #print('output shape before fc', output.shape)\n\n #TODO: probably shouldn't just apply residual layer in the forward pass.\n\n output_init = output*1.0\n\n output = self.dropout(self.fc(output_init))\n\n gate = torch.sigmoid(self.gate_fc(output_init))\n\n #output = self.layer_norm(gate * output + (1 - gate) * residual)\n #output = gate * output + (1 - gate) * residual\n\n if self.residual:\n output = gate * torch.tanh(output)\n else:\n #output = self.ln(output)\n pass\n\n # print(\"Final Output: \", output.size())\n\n #output\n\n #print('attn', attn[0])\n #print('output input diff', output - residual)\n\n return output, attn, extra_loss" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_volatile.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n print(\"Using gate style\", gate_style)\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n self.attn_log = None\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"query\", self.query_proj)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n count_parameters(\"key\", self.key_proj)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n count_parameters(\"value\", self.value_proj)\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n #self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n #self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n count_parameters(\"attention_mlp\", self.attention_mlp[0])\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n count_parameters(\"layernorm1\", self.attended_memory_layernorm)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n count_parameters(\"layernorm2\", self.attended_memory_layernorm2)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n count_parameters(\"input_projector\", self.input_projector)\n\n #self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n \n if gate_style in ['unit', 'memory']:\n self.input_gate_projector = RepeatLinear(self.mem_size, self.num_gates, num_steps)\n count_parameters(\"input_gate_projector\", self.input_gate_projector)\n self.memory_gate_projector = GroupLinearLayer(self.mem_size, self.num_gates, self.mem_slots)\n #self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n\n #(self.mem_size, self.num_gates, self.mem_slots)\n count_parameters(\"memory_gate_projector\", self.memory_gate_projector)\n \n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n print(\"relational volatie!!!\") \n #self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 256),\n # nn.ReLU(),\n # nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n if True:\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n else:\n init_state = torch.randn(batch_size, self.mem_slots, self.mem_size)\n return init_state\n def multihead_attention(self, input, memory, use_topk_ = True, store_log = True):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n #if store_log:\n # self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk and use_topk_:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def print_log(self):\n print(self.attn_log)\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n \n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print('jello')\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n #self.attn_log = gates[0]\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n self.attn_log = torch.zeros(input_gate.shape[1], input_gate.shape[2], 2)\n self.attn_log[:, :, 0] = input_gate[0].cpu()\n\n input_gate = torch.sigmoid(input_gate+self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n #memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n self.attn_log[:, :, 1] = input_gate[0].cpu()\n\n\n output = next_memory.reshape(next_memory.shape[0], -1)\n hx = self.multihead_attention(next_memory, inputs_reshape, use_topk_ = False, store_log = False)\n return output, next_memory, hx\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory, hx = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = None #self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory, hx\n else:\n return logits, memory_out, memory, hx" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory_regressive.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n self.score_log = None\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, ts, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n init_state = init_state.unsqueeze(1)\n init_state = init_state.repeat(1, ts, 1, 1)\n init_state = init_state.reshape(batch_size * ts, self.mem_slots, -1)\n\n return init_state\n\n def multihead_attention(self, input, memory, mask = None):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n mask = mask.unsqueeze(1).unsqueeze(1)\n #print(mask.size())\n #print(scores.size())\n #scores = scores.masked_fill(mask.bool(), float('-inf'))\n scores = Identity().apply(scores)\n\n scores = torch.softmax(scores, dim = -1)\n scores = scores * mask # mask for attending to prev positions only\n self.score_log = scores\n if True:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n topk_mask = torch.zeros(scores.size()).to(scores.device)\n topk_mask.scatter_(3, topk.indices, 1)\n scores = scores * topk_mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n #print(inputs.size())\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory, mask = None):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory, mask = mask)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False, mask = None, other_inp = None):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n #print(inputs.size())\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n #print(inputs_reshape.size())\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory, mask = mask)\n\n #print(next_memory.size())\n #print(inputs_reshape.size())\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(other_inp.unsqueeze(1), memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n # relational memory这里是不是\n def forward(self, inputs, memory):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n B, T, D = inputs.size()\n mask = torch.ones(inputs.size(1), inputs.size(1)).to(inputs.device)\n mask = torch.tril(mask)\n mask = mask.unsqueeze(0)\n mask = mask.repeat(inputs.size(0), 1, 1)\n\n mask = mask.reshape(mask.size(0) * mask.size(1), -1)\n\n inputs_ = inputs.unsqueeze(2)\n inputs_ = inputs_.repeat(1, 1, inputs.size(1), 1)\n inputs_ = inputs_.reshape(B * T, T, -1)\n\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n #if not parallel:\n # for idx_step in range(inputs.shape[1]):\n # logit, memory = self.forward_step(inputs[:, idx_step], memory)\n # logits.append(logit)\n # logits = torch.cat(logits)\n #else:\n logits, memory = self.forward_step(inputs_, memory, treat_input_as_matrix = True, mask = mask, other_inp = inputs.reshape(B * T, -1))\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory\n\n def print_log(self):\n print(self.score_log[25])" } ]
import math import time import numpy as np import torch import torch.nn.functional as F import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils from typing import Dict, Optional, Tuple from torch import Tensor, nn from torch.nn import Parameter from .fairseq_dropout import FairseqDropout from .attention_rim import MultiHeadAttention as MHAMemory from .quant_noise import quant_noise from .group_linear_layer import GroupLinearLayer from .relational_memory_volatile import RelationalMemory from .relational_memory_regressive import RelationalMemory as RelationalMemoryRegressive
14,080
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #import models.fairseq_util #from fairseq.incremental_decoding_utils import with_incremental_state #from .relational_memory_lstm import RelationalMemory # 为什么作者没有从这两个类别中引入relmem? #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer as GroupLinearLayer class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, nblocks=1, top_k_ratio=None, use_value_competition=True, shared_memory_attention = False, use_topk = False, topk = 3, num_steps = 5, mem_slots = 4, null_attention = False, regressive = False ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.head_dim = embed_dim // num_heads self.shared_memory_attention = shared_memory_attention print('total heads', self.num_heads) print('head dim', self.head_dim) self.use_topk = use_topk self.topk = topk print('use topk?' + str(self.use_topk)) print('topk:'+str(self.topk)) assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?
self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)
3
2023-12-15 13:13:01+00:00
16k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "AnimationFreeInitPipeline", "path": "animatediff/pipelines/pipeline_animation.py", "snippet": "class AnimationFreeInitPipeline(AnimationPipeline):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler)\n self.freq_filter = None\n\n \n @torch.no_grad()\n def init_filter(self, video_length, height, width, filter_params):\n # initialize frequency filter for noise reinitialization\n batch_size = 1\n num_channels_latents = self.unet.in_channels\n filter_shape = [\n batch_size, \n num_channels_latents, \n video_length, \n height // self.vae_scale_factor, \n width // self.vae_scale_factor\n ]\n # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params)\n self.freq_filter = get_freq_filter(\n filter_shape, \n device=self._execution_device, \n filter_type=filter_params.method,\n n=filter_params.n,\n d_s=filter_params.d_s,\n d_t=filter_params.d_t\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n # freeinit args\n num_iters: int = 5,\n use_fast_sampling: bool = False,\n save_intermediate: bool = False,\n return_orig: bool = False,\n save_dir: str = None,\n save_name: str = None,\n use_fp16: bool = False,\n **kwargs\n ):\n if use_fp16:\n print('Warning: using half percision for inferencing!')\n self.vae.to(dtype=torch.float16)\n self.unet.to(dtype=torch.float16)\n self.text_encoder.to(dtype=torch.float16)\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n # import pdb\n # pdb.set_trace()\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n if negative_prompt is not None:\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size \n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # Sampling with FreeInit.\n for iter in range(num_iters):\n # FreeInit ------------------------------------------------------------------\n if iter == 0:\n initial_noise = latents.detach().clone()\n else:\n # 1. DDPM Forward with initial noise, get noisy latents z_T\n # if use_fast_sampling:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1\n # else:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1\n current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level\n diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep))\n diffuse_timesteps = diffuse_timesteps.long()\n z_T = self.scheduler.add_noise(\n original_samples=latents.to(device), \n noise=initial_noise.to(device), \n timesteps=diffuse_timesteps.to(device)\n )\n # 2. create random noise z_rand for high-frequency\n z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device)\n # 3. Roise Reinitialization\n latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter)\n latents = latents.to(latents_dtype)\n \n # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results)\n if use_fast_sampling:\n current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n self.scheduler.set_timesteps(current_num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n # --------------------------------------------------------------------------\n\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n # if use_fast_sampling:\n # # Coarse-to-Fine Sampling for Fast Inference\n # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n # current_timesteps = timesteps[:current_num_inference_steps]\n # else:\n current_timesteps = timesteps\n for i, t in enumerate(current_timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n \n # save intermediate results\n if save_intermediate:\n # Post-processing\n video = self.decode_latents(latents)\n video = torch.from_numpy(video)\n os.makedirs(save_dir, exist_ok=True)\n save_videos_grid(video, f\"{save_dir}/{save_name}_iter{iter}.gif\")\n \n if return_orig and iter==0:\n orig_video = self.decode_latents(latents)\n orig_video = torch.from_numpy(orig_video)\n\n # Post-processing\n video = self.decode_latents(latents)\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n if return_orig:\n return AnimationFreeInitPipelineOutput(videos=video, orig_videos=orig_video)\n\n return AnimationFreeInitPipelineOutput(videos=video)" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint" }, { "identifier": "get_freq_filter", "path": "animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" } ]
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
13,894
["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda()
self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda()
0
2023-12-19 21:06:32+00:00
16k
exislow/tidal-dl-ng
tidal_dl_ng/gui.py
[ { "identifier": "get_format_template", "path": "tidal_dl_ng/helper/path.py", "snippet": "def get_format_template(\n media: Track | Album | Playlist | UserPlaylist | Video | Mix | MediaType, settings\n) -> str | bool:\n result = False\n\n if isinstance(media, Track) or media == MediaType.TRACK:\n result = settings.data.format_track\n elif isinstance(media, Album) or media == MediaType.ALBUM:\n result = settings.data.format_album\n elif isinstance(media, Playlist | UserPlaylist) or media == MediaType.PLAYLIST:\n result = settings.data.format_playlist\n elif isinstance(media, Mix) or media == MediaType.MIX:\n result = settings.data.format_mix\n elif isinstance(media, Video) or media == MediaType.VIDEO:\n result = settings.data.format_video\n\n return result" }, { "identifier": "Settings", "path": "tidal_dl_ng/config.py", "snippet": "class Settings(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelSettings\n data = None\n\n def __init__(self):\n self.file_path = path_file_settings()\n self.read(self.file_path)" }, { "identifier": "Tidal", "path": "tidal_dl_ng/config.py", "snippet": "class Tidal(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelToken\n session: tidalapi.Session = None\n data: ModelToken = None\n token_from_storage: bool = False\n settings: Settings = None\n\n def __init__(self, settings: Settings = None):\n self.session = tidalapi.Session()\n # self.session.config.client_id = \"km8T1xS355y7dd3H\"\n # self.session.config.client_secret = \"vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI=\"\n self.session.video_quality = tidalapi.VideoQuality.high\n self.file_path = path_file_token()\n self.token_from_storage = self.read(self.file_path)\n self.login_token()\n\n if settings:\n self.settings = settings\n self.settings_apply()\n\n def settings_apply(self, settings: Settings = None) -> bool:\n if settings:\n self.settings = settings\n\n self.session.audio_quality = self.settings.data.quality_audio\n\n return True\n\n def login_token(self) -> bool:\n result = False\n\n if self.token_from_storage:\n try:\n result = self.session.load_oauth_session(\n self.data.token_type, self.data.access_token, self.data.refresh_token, self.data.expiry_time\n )\n except HTTPError:\n result = False\n\n return result\n\n def login_oauth_start(self, function=print) -> None:\n self.session.login_oauth_simple(function)\n\n def login_oauth_finish(self) -> bool:\n result = self.session.check_login()\n\n if result:\n self.token_persist()\n\n return result\n\n def token_persist(self) -> None:\n self.set_option(\"token_type\", self.session.token_type)\n self.set_option(\"access_token\", self.session.access_token)\n self.set_option(\"refresh_token\", self.session.refresh_token)\n self.set_option(\"expiry_time\", self.session.expiry_time)\n self.save()\n\n def login(self, fn_print: Callable) -> bool:\n is_token = self.login_token()\n result = False\n\n if is_token:\n fn_print(\"Yep, looks good! You are logged in.\")\n\n result = True\n elif not is_token:\n fn_print(\"You either do not have a token or your token is invalid.\")\n fn_print(\"No worries, we will handle this...\")\n self.login_oauth_start(fn_print)\n\n is_login = self.login_oauth_finish()\n\n if is_login:\n fn_print(\"The login was successful. I have stored your credentials (token).\")\n\n result = True\n else:\n fn_print(\"Something went wrong. Did you login using your browser correctly? May try again...\")\n\n return result" }, { "identifier": "QualityVideo", "path": "tidal_dl_ng/constants.py", "snippet": "class QualityVideo(Enum):\n P360: int = 360\n P480: int = 480\n P720: int = 720\n P1080: int = 1080" }, { "identifier": "TidalLists", "path": "tidal_dl_ng/constants.py", "snippet": "class TidalLists(Enum):\n PLAYLISTS = \"Playlists\"\n FAVORITES = \"Favorites\"\n MIXES = \"Mixes\"" }, { "identifier": "Download", "path": "tidal_dl_ng/download.py", "snippet": "class Download:\n settings: Settings = None\n session: Session = None\n skip_existing: SkipExisting = False\n\n def __init__(self, session: Session, skip_existing: SkipExisting = SkipExisting.Disabled):\n self.settings = Settings()\n self.session = session\n self.skip_existing = skip_existing\n\n def _download(\n self,\n fn_logger: Callable,\n media: Track | Video,\n progress: Progress,\n progress_gui: ProgressBars,\n stream_manifest: StreamManifest,\n path_file: str,\n ):\n media_name: str = name_builder_item(media)\n\n # Set the correct progress output channel.\n if progress_gui is None:\n progress_stdout: bool = True\n else:\n progress_stdout: bool = False\n # Send signal to GUI with media name\n progress_gui.item_name.emit(media_name)\n\n try:\n # Compute total iterations for progress\n urls_count: int = len(stream_manifest.urls)\n\n if urls_count > 1:\n progress_total: int = urls_count\n block_size: int | None = None\n else:\n # Compute progress iterations based on the file size.\n r = requests.get(stream_manifest.urls[0], stream=True, timeout=REQUESTS_TIMEOUT_SEC)\n\n r.raise_for_status()\n\n # Get file size and compute progress steps\n total_size_in_bytes: int = int(r.headers.get(\"content-length\", 0))\n block_size: int | None = 4096\n progress_total: float = total_size_in_bytes / block_size\n\n # Create progress Task\n p_task: TaskID = progress.add_task(\n f\"[blue]Item '{media_name[:30]}'\",\n total=progress_total,\n visible=progress_stdout,\n )\n\n # Write content to file until progress is finished.\n while not progress.tasks[p_task].finished:\n with open(path_file, \"wb\") as f:\n for url in stream_manifest.urls:\n # Create the request object with stream=True, so the content won't be loaded into memory at once.\n r = requests.get(url, stream=True, timeout=REQUESTS_TIMEOUT_SEC)\n\n r.raise_for_status()\n\n # Write the content to disk. If `chunk_size` is set to `None` the whole file will be written at once.\n for data in r.iter_content(chunk_size=block_size):\n f.write(data)\n # Advance progress bar.\n progress.advance(p_task)\n\n # To send the progress to the GUI, we need to emit the percentage.\n if not progress_stdout:\n progress_gui.item.emit(progress.tasks[p_task].percentage)\n except HTTPError as e:\n # TODO: Handle Exception...\n fn_logger(e)\n\n # Check if file is encrypted.\n needs_decryption = self.is_encrypted(stream_manifest.encryption_type)\n\n if needs_decryption:\n key, nonce = decrypt_security_token(stream_manifest.encryption_key)\n tmp_path_file_decrypted = path_file + \"_decrypted\"\n decrypt_file(path_file, tmp_path_file_decrypted, key, nonce)\n else:\n tmp_path_file_decrypted = path_file\n\n # Write metadata to file.\n if not isinstance(media, Video):\n self.metadata_write(media, tmp_path_file_decrypted)\n\n return tmp_path_file_decrypted\n\n def instantiate_media(\n self,\n session: Session,\n media_type: type[MediaType.TRACK, MediaType.VIDEO, MediaType.ALBUM, MediaType.PLAYLIST, MediaType.MIX],\n id_media: str,\n ) -> Track | Video:\n if media_type == MediaType.TRACK:\n media = Track(session, id_media)\n elif media_type == MediaType.VIDEO:\n media = Video(session, id_media)\n elif media_type == MediaType.ALBUM:\n media = Album(self.session, id_media)\n elif media_type == MediaType.PLAYLIST:\n media = Playlist(self.session, id_media)\n elif media_type == MediaType.MIX:\n media = Mix(self.session, id_media)\n else:\n raise MediaUnknown\n\n return media\n\n def item(\n self,\n path_base: str,\n file_template: str,\n fn_logger: Callable,\n media: Track | Video = None,\n media_id: str = None,\n media_type: MediaType = None,\n video_download: bool = True,\n progress_gui: ProgressBars = None,\n progress: Progress = None,\n ) -> (bool, str):\n # If no media instance is provided, we need to create the media instance.\n if media_id and media_type:\n media = self.instantiate_media(self.session, media_type, media_id)\n elif not media:\n raise MediaMissing\n\n # If video download is not allowed end here\n if not video_download:\n fn_logger.info(\n f\"Video downloads are deactivated (see settings). Skipping video: {name_builder_item(media)}\"\n )\n\n return False, \"\"\n\n # Create file name and path\n file_name_relative = format_path_media(file_template, media)\n path_file = os.path.abspath(os.path.normpath(os.path.join(path_base, file_name_relative)))\n\n # Populate StreamManifest for further download.\n if isinstance(media, Track):\n stream = media.stream()\n manifest: str = stream.manifest\n mime_type: str = stream.manifest_mime_type\n else:\n manifest: str = media.get_url()\n mime_type: str = StreamManifestMimeType.VIDEO.value\n\n stream_manifest = self.stream_manifest_parse(manifest, mime_type)\n\n # Sanitize final path_file to fit into OS boundaries.\n path_file = path_file_sanitize(path_file + stream_manifest.file_extension, adapt=True)\n\n # Compute if and how downloads need to be skipped.\n if self.skip_existing.value:\n extension_ignore = self.skip_existing == SkipExisting.ExtensionIgnore\n # TODO: Check if extension is already in `path_file` or not.\n download_skip = check_file_exists(path_file, extension_ignore=extension_ignore)\n else:\n download_skip = False\n\n if not download_skip:\n # Create a temp directory and file.\n with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path_dir:\n tmp_path_file = os.path.join(tmp_path_dir, str(uuid4()) + stream_manifest.file_extension)\n # Download media.\n tmp_path_file = self._download(fn_logger, media, progress, progress_gui, stream_manifest, tmp_path_file)\n\n if isinstance(media, Video) and self.settings.data.video_convert_mp4:\n # TODO: Make optional.\n # Convert `*.ts` file to `*.mp4` using ffmpeg\n tmp_path_file = self._video_convert(tmp_path_file)\n path_file = os.path.splitext(path_file)[0] + \".mp4\"\n\n # Move final file to the configured destination directory.\n os.makedirs(os.path.dirname(path_file), exist_ok=True)\n shutil.move(tmp_path_file, path_file)\n else:\n fn_logger.debug(f\"Download skipped, since file exists: '{path_file}'\")\n\n return not download_skip, path_file\n\n def cover_url(self, sid: str, dimension: CoverDimensions = CoverDimensions.Px320):\n if sid is None:\n return \"\"\n\n return f\"https://resources.tidal.com/images/{sid.replace('-', '/')}/{dimension.value}.jpg\"\n\n def metadata_write(self, track: Track, path_file: str):\n result: bool = False\n release_date: str = (\n track.album.release_date.strftime(\"%Y-%m-%d\") if track.album and track.album.release_date else \"\"\n )\n copy_right: str = track.copyright if hasattr(track, \"copyright\") and track.copyright else \"\"\n isrc: str = track.isrc if hasattr(track, \"isrc\") and track.isrc else \"\"\n lyrics: str = \"\"\n\n if self.settings.data.lyrics_save:\n # Try to retrieve lyrics.\n try:\n lyrics: str = track.lyrics().subtitles if hasattr(track, \"lyrics\") else \"\"\n except HTTPError:\n # TODO: Implement proper logging.\n print(f\"Could not retrieve lyrics for `{name_builder_item(track)}`.\")\n\n # TODO: Check if it is possible to pass \"None\" values.\n m: Metadata = Metadata(\n path_file=path_file,\n lyrics=lyrics,\n copy_right=copy_right,\n title=track.name,\n artists=[artist.name for artist in track.artists],\n album=track.album.name if track.album else \"\",\n tracknumber=track.track_num,\n date=release_date,\n isrc=isrc,\n albumartist=name_builder_item(track),\n totaltrack=track.album.num_tracks if track.album and track.album.num_tracks else 1,\n totaldisc=track.album.num_volumes if track.album and track.album.num_volumes else 1,\n discnumber=track.volume_num if track.volume_num else 1,\n url_cover=(\n self.cover_url(track.album.cover, self.settings.data.metadata_cover_dimension) if track.album else \"\"\n ),\n )\n\n m.save()\n\n result = True\n\n return result\n\n def items(\n self,\n path_base: str,\n fn_logger: Logger | WrapperLogger,\n media_id: str = None,\n media_type: MediaType = None,\n file_template: str = None,\n media: Album | Playlist | UserPlaylist | Mix = None,\n video_download: bool = False,\n progress_gui: ProgressBars = None,\n progress: Progress = None,\n download_delay: bool = True,\n ):\n # If no media instance is provided, we need to create the media instance.\n if media_id and media_type:\n media = self.instantiate_media(self.session, media_type, media_id)\n elif not media:\n raise MediaMissing\n\n # Create file name and path\n file_name_relative = format_path_media(file_template, media)\n\n # TODO: Extend with pagination support: Iterate through `items` and `tracks`until len(returned list) == 0\n # Get the items and name of the list.\n if isinstance(media, Mix):\n items = media.items()\n list_media_name = media.title[:30]\n elif video_download:\n items = media.items(limit=100)\n list_media_name = media.name[:30]\n else:\n items = media.tracks(limit=999)\n list_media_name = media.name[:30]\n\n # Determine where to redirect the progress information.\n if progress_gui is None:\n progress_stdout: bool = True\n else:\n progress_stdout: bool = False\n\n # Create the list progress task.\n p_task1: TaskID = progress.add_task(\n f\"[green]List '{list_media_name}'\", total=len(items), visible=progress_stdout\n )\n\n # Iterate through list items\n while not progress.finished:\n for media in items:\n # TODO: Handle return value of `track` method.\n # Download the item.\n status_download, result_path_file = self.item(\n path_base=path_base,\n file_template=file_name_relative,\n media=media,\n progress_gui=progress_gui,\n progress=progress,\n fn_logger=fn_logger,\n )\n\n # Advance progress bar.\n progress.advance(p_task1)\n\n if not progress_stdout:\n progress_gui.list_item.emit(progress.tasks[p_task1].percentage)\n\n # If a file was downloaded and the download delay is enabled, wait until the next download.\n if download_delay and status_download:\n time_sleep: float = round(random.SystemRandom().uniform(2, 5), 1)\n\n # TODO: Fix logging. Is not displayed in debug window.\n fn_logger.debug(f\"Next download will start in {time_sleep} seconds.\")\n time.sleep(time_sleep)\n\n def is_encrypted(self, encryption_type: str) -> bool:\n result = encryption_type != \"NONE\"\n\n return result\n\n def get_file_extension(self, stream_url: str, stream_codec: str) -> str:\n if \".flac\" in stream_url:\n result: str = \".flac\"\n elif \".mp4\" in stream_url:\n # TODO: Need to investigate, what the correct extension is.\n # if \"ac4\" in stream_codec or \"mha1\" in stream_codec:\n # result = \".mp4\"\n # elif \"flac\" in stream_codec:\n # result = \".flac\"\n # else:\n # result = \".m4a\"\n result: str = \".mp4\"\n elif \".ts\" in stream_url:\n result: str = \".ts\"\n else:\n result: str = \".m4a\"\n\n return result\n\n def _video_convert(self, path_file: str) -> str:\n path_file_out = os.path.splitext(path_file)[0] + \".mp4\"\n result, _ = ffmpeg.input(path_file).output(path_file_out, map=0, c=\"copy\").run()\n\n return path_file_out\n\n def stream_manifest_parse(self, manifest: str, mime_type: str) -> StreamManifest:\n if mime_type == StreamManifestMimeType.MPD.value:\n # Stream Manifest is base64 encoded.\n manifest_parsed: str = base64.b64decode(manifest).decode(\"utf-8\")\n mpd = MPEGDASHParser.parse(manifest_parsed)\n codecs: str = mpd.periods[0].adaptation_sets[0].representations[0].codecs\n mime_type: str = mpd.periods[0].adaptation_sets[0].mime_type\n # TODO: Handle encryption key. But I have never seen an encrypted file so far.\n encryption_type: str = \"NONE\"\n encryption_key: str | None = None\n # .initialization + the very first of .media; See https://developers.broadpeak.io/docs/foundations-dash\n segments_count = 1 + 1\n\n for s in mpd.periods[0].adaptation_sets[0].representations[0].segment_templates[0].segment_timelines[0].Ss:\n segments_count += s.r if s.r else 1\n\n # Populate segment urls.\n segment_template = mpd.periods[0].adaptation_sets[0].representations[0].segment_templates[0]\n stream_urls: list[str] = []\n\n for index in range(segments_count):\n stream_urls.append(segment_template.media.replace(\"$Number$\", str(index)))\n\n elif mime_type == StreamManifestMimeType.BTS.value:\n # Stream Manifest is base64 encoded.\n manifest_parsed: str = base64.b64decode(manifest).decode(\"utf-8\")\n # JSON string to object.\n stream_manifest = json.loads(manifest_parsed)\n # TODO: Handle more than one download URL\n stream_urls: str = stream_manifest[\"urls\"]\n codecs: str = stream_manifest[\"codecs\"]\n mime_type: str = stream_manifest[\"mimeType\"]\n encryption_type: str = stream_manifest[\"encryptionType\"]\n encryption_key: str | None = (\n stream_manifest[\"encryptionKey\"] if self.is_encrypted(encryption_type) else None\n )\n elif mime_type == StreamManifestMimeType.VIDEO.value:\n # Parse M3U8 video playlist\n m3u8_variant: m3u8.M3U8 = m3u8.load(manifest)\n # Find the desired video resolution or the next best one.\n m3u8_playlist, codecs = self._extract_video_stream(m3u8_variant, self.settings.data.quality_video.value)\n # Populate urls.\n stream_urls: list[str] = m3u8_playlist.files\n\n # TODO: Handle encryption key. But I have never seen an encrypted file so far.\n encryption_type: str = \"NONE\"\n encryption_key: str | None = None\n else:\n raise UnknownManifestFormat\n\n file_extension: str = self.get_file_extension(stream_urls[0], codecs)\n\n result: StreamManifest = StreamManifest(\n urls=stream_urls,\n codecs=codecs,\n file_extension=file_extension,\n encryption_type=encryption_type,\n encryption_key=encryption_key,\n mime_type=mime_type,\n )\n\n return result\n\n def _extract_video_stream(self, m3u8_variant: m3u8.M3U8, quality: str) -> (m3u8.M3U8 | bool, str):\n m3u8_playlist: m3u8.M3U8 | bool = False\n resolution_best: int = 0\n mime_type: str = \"\"\n\n if m3u8_variant.is_variant:\n for playlist in m3u8_variant.playlists:\n if resolution_best < playlist.stream_info.resolution[1]:\n resolution_best = playlist.stream_info.resolution[1]\n m3u8_playlist = m3u8.load(playlist.uri)\n mime_type = playlist.stream_info.codecs\n\n if quality == playlist.stream_info.resolution[1]:\n break\n\n return m3u8_playlist, mime_type" }, { "identifier": "XStream", "path": "tidal_dl_ng/logger.py", "snippet": "class XStream(QtCore.QObject):\nclass QtHandler(logging.Handler):\n def flush(self):\n def fileno(self):\n def write(self, msg):\n def stdout():\n def stderr():\n def __init__(self):\n def emit(self, record):" }, { "identifier": "ProgressBars", "path": "tidal_dl_ng/model/gui_data.py", "snippet": "class ProgressBars:\n item: QtCore.Signal\n item_name: QtCore.Signal\n list_item: QtCore.Signal" }, { "identifier": "ResultSearch", "path": "tidal_dl_ng/model/gui_data.py", "snippet": "class ResultSearch:\n position: int\n artist: str\n title: str\n album: str\n duration_sec: int\n obj: object" }, { "identifier": "Ui_MainWindow", "path": "tidal_dl_ng/ui/main.py", "snippet": "class Ui_MainWindow:\n def setupUi(self, MainWindow):\n if not MainWindow.objectName():\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(900, 700)\n self.a_options = QAction(MainWindow)\n self.a_options.setObjectName(\"a_options\")\n self.a_options.setEnabled(False)\n self.a_options.setText(\"Options\")\n self.a_options.setIconText(\"Options\")\n # if QT_CONFIG(tooltip)\n self.a_options.setToolTip(\"Options\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.a_options.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.a_options.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n self.w_central = QWidget(MainWindow)\n self.w_central.setObjectName(\"w_central\")\n self.w_central.setEnabled(True)\n sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(100)\n sizePolicy.setVerticalStretch(100)\n sizePolicy.setHeightForWidth(self.w_central.sizePolicy().hasHeightForWidth())\n self.w_central.setSizePolicy(sizePolicy)\n # if QT_CONFIG(tooltip)\n self.w_central.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.w_central.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.w_central.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.w_central.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.w_central.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.horizontalLayout = QHBoxLayout(self.w_central)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.lh_main = QHBoxLayout()\n self.lh_main.setObjectName(\"lh_main\")\n self.lh_main.setSizeConstraint(QLayout.SetNoConstraint)\n self.tr_lists_user = QTreeWidget(self.w_central)\n __qtreewidgetitem = QTreeWidgetItem()\n __qtreewidgetitem.setText(1, \"Info\")\n __qtreewidgetitem.setText(0, \"Playlist\")\n self.tr_lists_user.setHeaderItem(__qtreewidgetitem)\n __qtreewidgetitem1 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem1.setFlags(Qt.ItemIsEnabled)\n __qtreewidgetitem2 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem2.setFlags(Qt.ItemIsEnabled)\n __qtreewidgetitem3 = QTreeWidgetItem(self.tr_lists_user)\n __qtreewidgetitem3.setFlags(Qt.ItemIsEnabled)\n self.tr_lists_user.setObjectName(\"tr_lists_user\")\n # if QT_CONFIG(tooltip)\n self.tr_lists_user.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.tr_lists_user.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.tr_lists_user.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.tr_lists_user.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.tr_lists_user.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.tr_lists_user.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tr_lists_user.setProperty(\"showDropIndicator\", False)\n self.tr_lists_user.setIndentation(10)\n self.tr_lists_user.setUniformRowHeights(True)\n self.tr_lists_user.setSortingEnabled(True)\n self.tr_lists_user.header().setCascadingSectionResizes(True)\n self.tr_lists_user.header().setHighlightSections(True)\n self.tr_lists_user.header().setProperty(\"showSortIndicator\", True)\n\n self.lh_main.addWidget(self.tr_lists_user)\n\n self.lv_search_result = QVBoxLayout()\n # ifndef Q_OS_MAC\n self.lv_search_result.setSpacing(-1)\n # endif\n self.lv_search_result.setObjectName(\"lv_search_result\")\n self.lh_search = QHBoxLayout()\n self.lh_search.setObjectName(\"lh_search\")\n self.l_search = QLineEdit(self.w_central)\n self.l_search.setObjectName(\"l_search\")\n self.l_search.setAcceptDrops(False)\n # if QT_CONFIG(tooltip)\n self.l_search.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_search.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_search.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_search.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_search.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_search.setLocale(QLocale(QLocale.English, QLocale.UnitedStates))\n self.l_search.setText(\"\")\n self.l_search.setPlaceholderText(\"Type and press ENTER to search...\")\n self.l_search.setClearButtonEnabled(True)\n\n self.lh_search.addWidget(self.l_search)\n\n self.cb_search_type = QComboBox(self.w_central)\n self.cb_search_type.setObjectName(\"cb_search_type\")\n # if QT_CONFIG(tooltip)\n self.cb_search_type.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_search_type.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_search_type.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_search_type.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_search_type.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_search_type.setCurrentText(\"\")\n self.cb_search_type.setPlaceholderText(\"\")\n\n self.lh_search.addWidget(self.cb_search_type)\n\n self.b_search = QPushButton(self.w_central)\n self.b_search.setObjectName(\"b_search\")\n # if QT_CONFIG(statustip)\n self.b_search.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.b_search.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.b_search.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.b_search.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.b_search.setText(\"Search\")\n # if QT_CONFIG(shortcut)\n self.b_search.setShortcut(\"\")\n # endif // QT_CONFIG(shortcut)\n\n self.lh_search.addWidget(self.b_search)\n\n self.lv_search_result.addLayout(self.lh_search)\n\n self.tr_results = QTreeWidget(self.w_central)\n self.tr_results.setObjectName(\"tr_results\")\n self.tr_results.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.tr_results.setProperty(\"showDropIndicator\", False)\n self.tr_results.setDragDropOverwriteMode(False)\n self.tr_results.setAlternatingRowColors(False)\n self.tr_results.setSelectionMode(QAbstractItemView.ExtendedSelection)\n self.tr_results.setIndentation(10)\n self.tr_results.setSortingEnabled(True)\n self.tr_results.header().setProperty(\"showSortIndicator\", True)\n self.tr_results.header().setStretchLastSection(False)\n\n self.lv_search_result.addWidget(self.tr_results)\n\n self.lh_download = QHBoxLayout()\n self.lh_download.setObjectName(\"lh_download\")\n self.l_quality_audio = QLabel(self.w_central)\n self.l_quality_audio.setObjectName(\"l_quality_audio\")\n # if QT_CONFIG(tooltip)\n self.l_quality_audio.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_quality_audio.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_quality_audio.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_quality_audio.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_quality_audio.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_quality_audio.setText(\"Audio\")\n self.l_quality_audio.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)\n\n self.lh_download.addWidget(self.l_quality_audio)\n\n self.cb_quality_audio = QComboBox(self.w_central)\n self.cb_quality_audio.setObjectName(\"cb_quality_audio\")\n # if QT_CONFIG(tooltip)\n self.cb_quality_audio.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_quality_audio.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_quality_audio.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_quality_audio.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_quality_audio.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_quality_audio.setCurrentText(\"\")\n self.cb_quality_audio.setPlaceholderText(\"\")\n self.cb_quality_audio.setFrame(True)\n\n self.lh_download.addWidget(self.cb_quality_audio)\n\n self.l_quality_video = QLabel(self.w_central)\n self.l_quality_video.setObjectName(\"l_quality_video\")\n # if QT_CONFIG(tooltip)\n self.l_quality_video.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.l_quality_video.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.l_quality_video.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.l_quality_video.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.l_quality_video.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.l_quality_video.setText(\"Video\")\n self.l_quality_video.setAlignment(Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter)\n\n self.lh_download.addWidget(self.l_quality_video)\n\n self.cb_quality_video = QComboBox(self.w_central)\n self.cb_quality_video.setObjectName(\"cb_quality_video\")\n # if QT_CONFIG(tooltip)\n self.cb_quality_video.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.cb_quality_video.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.cb_quality_video.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.cb_quality_video.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.cb_quality_video.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.cb_quality_video.setCurrentText(\"\")\n self.cb_quality_video.setPlaceholderText(\"\")\n\n self.lh_download.addWidget(self.cb_quality_video)\n\n self.b_download = QPushButton(self.w_central)\n self.b_download.setObjectName(\"b_download\")\n # if QT_CONFIG(tooltip)\n self.b_download.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.b_download.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.b_download.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.b_download.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.b_download.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.b_download.setText(\"Download\")\n # if QT_CONFIG(shortcut)\n self.b_download.setShortcut(\"\")\n # endif // QT_CONFIG(shortcut)\n\n self.lh_download.addWidget(self.b_download)\n\n self.lh_download.setStretch(0, 5)\n self.lh_download.setStretch(2, 5)\n self.lh_download.setStretch(4, 15)\n\n self.lv_search_result.addLayout(self.lh_download)\n\n self.te_debug = QPlainTextEdit(self.w_central)\n self.te_debug.setObjectName(\"te_debug\")\n self.te_debug.setEnabled(True)\n sizePolicy1 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Maximum)\n sizePolicy1.setHorizontalStretch(0)\n sizePolicy1.setVerticalStretch(0)\n sizePolicy1.setHeightForWidth(self.te_debug.sizePolicy().hasHeightForWidth())\n self.te_debug.setSizePolicy(sizePolicy1)\n self.te_debug.setMaximumSize(QSize(16777215, 16777215))\n self.te_debug.setAcceptDrops(False)\n # if QT_CONFIG(tooltip)\n self.te_debug.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.te_debug.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.te_debug.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.te_debug.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.te_debug.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.te_debug.setUndoRedoEnabled(False)\n self.te_debug.setReadOnly(True)\n\n self.lv_search_result.addWidget(self.te_debug)\n\n self.lh_main.addLayout(self.lv_search_result)\n\n self.lh_main.setStretch(0, 40)\n self.lh_main.setStretch(1, 60)\n\n self.horizontalLayout.addLayout(self.lh_main)\n\n MainWindow.setCentralWidget(self.w_central)\n self.menubar = QMenuBar(MainWindow)\n self.menubar.setObjectName(\"menubar\")\n self.menubar.setGeometry(QRect(0, 0, 900, 24))\n # if QT_CONFIG(tooltip)\n self.menubar.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.menubar.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.menubar.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.menubar.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.menubar.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.m_file = QMenu(self.menubar)\n self.m_file.setObjectName(\"m_file\")\n # if QT_CONFIG(tooltip)\n self.m_file.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.m_file.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.m_file.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.m_file.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.m_file.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n # if QT_CONFIG(tooltip)\n self.statusbar.setToolTip(\"\")\n # endif // QT_CONFIG(tooltip)\n # if QT_CONFIG(statustip)\n self.statusbar.setStatusTip(\"\")\n # endif // QT_CONFIG(statustip)\n # if QT_CONFIG(whatsthis)\n self.statusbar.setWhatsThis(\"\")\n # endif // QT_CONFIG(whatsthis)\n # if QT_CONFIG(accessibility)\n self.statusbar.setAccessibleName(\"\")\n # endif // QT_CONFIG(accessibility)\n # if QT_CONFIG(accessibility)\n self.statusbar.setAccessibleDescription(\"\")\n # endif // QT_CONFIG(accessibility)\n self.statusbar.setLayoutDirection(Qt.LeftToRight)\n MainWindow.setStatusBar(self.statusbar)\n\n self.menubar.addAction(self.m_file.menuAction())\n self.m_file.addAction(self.a_options)\n\n self.retranslateUi(MainWindow)\n\n QMetaObject.connectSlotsByName(MainWindow)\n\n # setupUi\n\n def retranslateUi(self, MainWindow):\n MainWindow.setWindowTitle(QCoreApplication.translate(\"MainWindow\", \"MainWindow\", None))\n ___qtreewidgetitem = self.tr_lists_user.headerItem()\n ___qtreewidgetitem.setText(2, QCoreApplication.translate(\"MainWindow\", \"obj\", None))\n\n __sortingEnabled = self.tr_lists_user.isSortingEnabled()\n self.tr_lists_user.setSortingEnabled(False)\n ___qtreewidgetitem1 = self.tr_lists_user.topLevelItem(0)\n ___qtreewidgetitem1.setText(0, QCoreApplication.translate(\"MainWindow\", \"Playlists\", None))\n ___qtreewidgetitem2 = self.tr_lists_user.topLevelItem(1)\n ___qtreewidgetitem2.setText(0, QCoreApplication.translate(\"MainWindow\", \"Mixes\", None))\n ___qtreewidgetitem3 = self.tr_lists_user.topLevelItem(2)\n ___qtreewidgetitem3.setText(0, QCoreApplication.translate(\"MainWindow\", \"Favorites\", None))\n self.tr_lists_user.setSortingEnabled(__sortingEnabled)\n\n ___qtreewidgetitem4 = self.tr_results.headerItem()\n ___qtreewidgetitem4.setText(5, QCoreApplication.translate(\"MainWindow\", \"obj\", None))\n ___qtreewidgetitem4.setText(4, QCoreApplication.translate(\"MainWindow\", \"Duration\", None))\n ___qtreewidgetitem4.setText(3, QCoreApplication.translate(\"MainWindow\", \"Album\", None))\n ___qtreewidgetitem4.setText(2, QCoreApplication.translate(\"MainWindow\", \"Title\", None))\n ___qtreewidgetitem4.setText(1, QCoreApplication.translate(\"MainWindow\", \"Artist\", None))\n ___qtreewidgetitem4.setText(0, QCoreApplication.translate(\"MainWindow\", \"#\", None))\n self.te_debug.setPlaceholderText(QCoreApplication.translate(\"MainWindow\", \"Logs...\", None))\n self.m_file.setTitle(QCoreApplication.translate(\"MainWindow\", \"File\", None))\n\n # retranslateUi" }, { "identifier": "QtWaitingSpinner", "path": "tidal_dl_ng/ui/spinner.py", "snippet": "class QtWaitingSpinner(QWidget):\n def __init__(\n self, parent, centerOnParent=True, disableParentWhenSpinning=False, modality=Qt.WindowModality.NonModal\n ):\n super().__init__(parent)\n\n self._centerOnParent = centerOnParent\n self._disableParentWhenSpinning = disableParentWhenSpinning\n\n # WAS IN initialize()\n self._color = QColor(Qt.GlobalColor.black)\n self._roundness = 100.0\n self._minimumTrailOpacity = 3.14159265358979323846\n self._trailFadePercentage = 80.0\n self._revolutionsPerSecond = 1.57079632679489661923\n self._numberOfLines = 20\n self._lineLength = 10\n self._lineWidth = 2\n self._innerRadius = 10\n self._currentCounter = 0\n self._isSpinning = False\n\n self._timer = QTimer(self)\n self._timer.timeout.connect(self.rotate)\n self.updateSize()\n self.updateTimer()\n self.hide()\n # END initialize()\n\n self.setWindowModality(modality)\n self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)\n\n def paintEvent(self, QPaintEvent):\n self.updatePosition()\n painter = QPainter(self)\n painter.fillRect(self.rect(), Qt.GlobalColor.transparent)\n # Can't found in Qt6\n # painter.setRenderHint(QPainter.Antialiasing, True)\n\n if self._currentCounter >= self._numberOfLines:\n self._currentCounter = 0\n\n painter.setPen(Qt.PenStyle.NoPen)\n for i in range(0, self._numberOfLines):\n painter.save()\n painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)\n rotateAngle = float(360 * i) / float(self._numberOfLines)\n painter.rotate(rotateAngle)\n painter.translate(self._innerRadius, 0)\n distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)\n color = self.currentLineColor(\n distance, self._numberOfLines, self._trailFadePercentage, self._minimumTrailOpacity, self._color\n )\n painter.setBrush(color)\n rect = QRect(0, int(-self._lineWidth / 2), int(self._lineLength), int(self._lineWidth))\n painter.drawRoundedRect(rect, self._roundness, self._roundness, Qt.SizeMode.RelativeSize)\n painter.restore()\n\n def start(self):\n self.updatePosition()\n self._isSpinning = True\n self.show()\n\n if self.parentWidget and self._disableParentWhenSpinning:\n self.parentWidget().setEnabled(False)\n\n if not self._timer.isActive():\n self._timer.start()\n self._currentCounter = 0\n\n def stop(self):\n self._isSpinning = False\n self.hide()\n\n if self.parentWidget() and self._disableParentWhenSpinning:\n self.parentWidget().setEnabled(True)\n\n if self._timer.isActive():\n self._timer.stop()\n self._currentCounter = 0\n\n def setNumberOfLines(self, lines):\n self._numberOfLines = lines\n self._currentCounter = 0\n self.updateTimer()\n\n def setLineLength(self, length):\n self._lineLength = length\n self.updateSize()\n\n def setLineWidth(self, width):\n self._lineWidth = width\n self.updateSize()\n\n def setInnerRadius(self, radius):\n self._innerRadius = radius\n self.updateSize()\n\n def color(self):\n return self._color\n\n def roundness(self):\n return self._roundness\n\n def minimumTrailOpacity(self):\n return self._minimumTrailOpacity\n\n def trailFadePercentage(self):\n return self._trailFadePercentage\n\n def revolutionsPersSecond(self):\n return self._revolutionsPerSecond\n\n def numberOfLines(self):\n return self._numberOfLines\n\n def lineLength(self):\n return self._lineLength\n\n def lineWidth(self):\n return self._lineWidth\n\n def innerRadius(self):\n return self._innerRadius\n\n def isSpinning(self):\n return self._isSpinning\n\n def setRoundness(self, roundness):\n self._roundness = max(0.0, min(100.0, roundness))\n\n def setColor(self, color=Qt.GlobalColor.black):\n self._color = QColor(color)\n\n def setRevolutionsPerSecond(self, revolutionsPerSecond):\n self._revolutionsPerSecond = revolutionsPerSecond\n self.updateTimer()\n\n def setTrailFadePercentage(self, trail):\n self._trailFadePercentage = trail\n\n def setMinimumTrailOpacity(self, minimumTrailOpacity):\n self._minimumTrailOpacity = minimumTrailOpacity\n\n def rotate(self):\n self._currentCounter += 1\n if self._currentCounter >= self._numberOfLines:\n self._currentCounter = 0\n self.update()\n\n def updateSize(self):\n size = int((self._innerRadius + self._lineLength) * 2)\n self.setFixedSize(size, size)\n\n def updateTimer(self):\n self._timer.setInterval(int(1000 / (self._numberOfLines * self._revolutionsPerSecond)))\n\n def updatePosition(self):\n if self.parentWidget() and self._centerOnParent:\n self.move(\n int(self.parentWidget().width() / 2 - self.width() / 2),\n int(self.parentWidget().height() / 2 - self.height() / 2),\n )\n\n def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):\n distance = primary - current\n if distance < 0:\n distance += totalNrOfLines\n return distance\n\n def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):\n color = QColor(colorinput)\n if countDistance == 0:\n return color\n minAlphaF = minOpacity / 100.0\n distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))\n if countDistance > distanceThreshold:\n color.setAlphaF(minAlphaF)\n else:\n alphaDiff = color.alphaF() - minAlphaF\n gradient = alphaDiff / float(distanceThreshold + 1)\n resultAlpha = color.alphaF() - gradient * countDistance\n # If alpha is out of bounds, clip it.\n resultAlpha = min(1.0, max(0.0, resultAlpha))\n color.setAlphaF(resultAlpha)\n return color" }, { "identifier": "Worker", "path": "tidal_dl_ng/worker.py", "snippet": "class Worker(QtCore.QRunnable):\n \"\"\"\n Worker thread\n\n Inherits from QRunnable to handler worker thread setup, signals and wrap-up.\n\n :param callback: The function callback to run on this worker thread. Supplied args and\n kwargs will be passed through to the runner.\n :type callback: function\n :param args: Arguments to pass to the callback function\n :param kwargs: Keywords to pass to the callback function\n\n \"\"\"\n\n def __init__(self, fn, *args, **kwargs):\n super().__init__()\n # Store constructor arguments (re-used for processing)\n self.fn = fn\n self.args = args\n self.kwargs = kwargs\n\n @QtCore.Slot() # QtCore.Slot\n def run(self):\n \"\"\"\n Initialise the runner function with passed args, kwargs.\n \"\"\"\n self.fn(*self.args, **self.kwargs)" } ]
import math import sys import qdarktheme import coloredlogs.converter from collections.abc import Callable from tidal_dl_ng.helper.path import get_format_template from PySide6 import QtCore, QtGui, QtWidgets from rich.progress import Progress from tidalapi import Album, Mix, Playlist, Quality, Track, UserPlaylist, Video from tidalapi.session import SearchTypes from tidal_dl_ng.config import Settings, Tidal from tidal_dl_ng.constants import QualityVideo, TidalLists from tidal_dl_ng.download import Download from tidal_dl_ng.logger import XStream, logger_gui from tidal_dl_ng.model.gui_data import ProgressBars, ResultSearch from tidal_dl_ng.ui.main import Ui_MainWindow from tidal_dl_ng.ui.spinner import QtWaitingSpinner from tidal_dl_ng.worker import Worker
14,197
if isinstance(l_media, list): result = result + self.search_result_to_model(l_media) return result def search_result_to_model(self, items: [*SearchTypes]) -> [ResultSearch]: result = [] for idx, item in enumerate(items): if isinstance(item, Track): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name, duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Video): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name if item.album else "", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Playlist): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.promoted_artists) if item.promoted_artists else "", title=item.name, album="", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Album): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title="", album=item.name, duration_sec=item.duration, obj=item, ) result.append(result_item) return result def _init_signals(self): self.b_download.clicked.connect(lambda: self.thread_it(self.on_download_results)) self.l_search.returnPressed.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.b_search.clicked.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.cb_quality_audio.currentIndexChanged.connect(self.quality_set_audio) self.cb_quality_video.currentIndexChanged.connect(self.quality_set_video) self.tr_lists_user.itemClicked.connect(self.on_list_items_show) self.spinner_start[QtWidgets.QWidget].connect(self.on_spinner_start) self.spinner_stop.connect(self.on_spinner_stop) self.s_item_advance.connect(self.progress_item) self.s_item_name.connect(self.progress_item_name) self.s_list_advance.connect(self.progress_list) self.s_pb_reset.connect(self.progress_reset) self.s_populate_tree_lists.connect(self.on_populate_tree_lists) def progress_list(self, value: float): self.pb_list.setValue(int(math.ceil(value))) def progress_item(self, value: float): self.pb_item.setValue(int(math.ceil(value))) def progress_item_name(self, value: str): self.pb_item.setFormat(f"%p% {value}") def progress_list_name(self, value: str): self.pb_list.setFormat(f"%p% {value}") def quality_set_audio(self, index): self.settings.data.quality_audio = Quality(self.cb_quality_audio.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def quality_set_video(self, index): self.settings.data.quality_video = QualityVideo(self.cb_quality_video.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def on_list_items_show(self, item: QtWidgets.QTreeWidgetItem): media_list: Album | Playlist = item.data(3, QtCore.Qt.ItemDataRole.UserRole) # Only if clicked item is not a top level item. if media_list: self.list_items_show(media_list) def list_items_show(self, media_list: Album | Playlist | None = None, point: QtCore.QPoint | None = None): if point: item = self.tr_lists_user.itemAt(point) media_list = item.data(3, QtCore.Qt.ItemDataRole.UserRole) media_items = media_list.items() result = self.search_result_to_model(media_items) self.populate_tree_results(result) def thread_it(self, fn: Callable, *args, **kwargs): # Any other args, kwargs are passed to the run function
try: except ImportError as e: print(e) print("Qt dependencies missing. Cannot start GUI. Please execute: 'pip install pyside6 pyqtdarktheme'") sys.exit(1) # TODO: Make more use of Exceptions # TODO: Add File -> Version class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): settings: Settings = None tidal: Tidal = None dl: Download = None threadpool: QtCore.QThreadPool = None spinner: QtWaitingSpinner = None spinner_start: QtCore.Signal = QtCore.Signal(QtWidgets.QWidget) spinner_stop: QtCore.Signal = QtCore.Signal() pb_item: QtWidgets.QProgressBar = None s_item_advance: QtCore.Signal = QtCore.Signal(float) s_item_name: QtCore.Signal = QtCore.Signal(str) pb_list: QtWidgets.QProgressBar = None s_list_advance: QtCore.Signal = QtCore.Signal(float) s_pb_reset: QtCore.Signal = QtCore.Signal() s_populate_tree_lists: QtCore.Signal = QtCore.Signal(list) def __init__(self, tidal: Tidal | None = None): super().__init__() self.setupUi(self) # self.setGeometry(50, 50, 500, 300) self.setWindowTitle("TIDAL Downloader Next Gen!") # TODO: Fix icons (make them visible). # my_pixmap = QtGui.QPixmap("tidal_dl_ng/ui/icon.png") my_icon = QtGui.QIcon("tidal_dl_ng/ui/icon.png") self.setWindowIcon(my_icon) tray = QtWidgets.QSystemTrayIcon() tray.setIcon(my_icon) tray.setVisible(True) # Logging redirect. XStream.stdout().messageWritten.connect(self._log_output) # XStream.stderr().messageWritten.connect(self._log_output) self.settings = Settings() self.threadpool = QtCore.QThreadPool() # TODO: Show GUI, create a progress bar showing the TIDAL querying progress. self._init_tree_results(self.tr_results) self._init_tree_lists(self.tr_lists_user) self._init_progressbar() self._populate_quality(self.cb_quality_audio, Quality) self._populate_quality(self.cb_quality_video, QualityVideo) self._populate_search_types(self.cb_search_type, SearchTypes) self.apply_settings(self.settings) self._init_signals() self.init_tidal(tidal) logger_gui.debug("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount()) logger_gui.debug("All setup.") def init_tidal(self, tidal: Tidal = None): result: bool = False if tidal: self.tidal = tidal result = True else: self.tidal = Tidal(self.settings) while True: result = self.tidal.login(logger_gui.info) if result: break if result: self.dl = Download(self.tidal.session, self.tidal.settings.data.skip_existing) self.thread_it(self.tidal_user_lists) def _init_progressbar(self): self.pb_list = QtWidgets.QProgressBar() self.pb_item = QtWidgets.QProgressBar() pbs = [self.pb_list, self.pb_item] for pb in pbs: pb.setRange(0, 100) # self.pb_progress.setVisible() self.statusbar.addPermanentWidget(pb) def progress_reset(self): self.pb_list.setValue(0) self.pb_item.setValue(0) def _log_output(self, text): display_msg = coloredlogs.converter.convert(text) cursor: QtGui.QTextCursor = self.te_debug.textCursor() cursor.movePosition(QtGui.QTextCursor.End) cursor.insertHtml(display_msg) self.te_debug.setTextCursor(cursor) self.te_debug.ensureCursorVisible() def _populate_quality(self, ui_target: QtWidgets.QComboBox, options: type[Quality | QualityVideo]): for item in options: ui_target.addItem(item.name, item) def _populate_search_types(self, ui_target: QtWidgets.QComboBox, options: SearchTypes): for item in options: if item and item.__name__ != "Artist": ui_target.addItem(item.__name__, item) self.cb_search_type.setCurrentIndex(1) def _init_tree_results(self, tree: QtWidgets.QTableWidget): tree.setColumnHidden(5, True) tree.sortByColumn(0, QtCore.Qt.SortOrder.AscendingOrder) # TODO: Refactor to own TIDAL file or so. def tidal_user_lists(self): # Start loading spinner self.spinner_start.emit(self.tr_lists_user) user_playlists: [Playlist | UserPlaylist] = self.tidal.session.user.playlist_and_favorite_playlists() user_mixes: [Mix] = self.tidal.session.mixes().categories[0].items user_all: [Playlist | UserPlaylist | Mix] = user_playlists + user_mixes self.s_populate_tree_lists.emit(user_all) def on_populate_tree_lists(self, user_lists: [Playlist | UserPlaylist | Mix]): self.tr_results.clear() twi_playlists: QtWidgets.QTreeWidgetItem = self.tr_lists_user.findItems( TidalLists.PLAYLISTS.value, QtCore.Qt.MatchExactly, 0 )[0] twi_mixes: QtWidgets.QTreeWidgetItem = self.tr_lists_user.findItems( TidalLists.FAVORITES.value, QtCore.Qt.MatchExactly, 0 )[0] twi_favorites: QtWidgets.QTreeWidgetItem = self.tr_lists_user.findItems( TidalLists.MIXES.value, QtCore.Qt.MatchExactly, 0 )[0] for item in user_lists: if isinstance(item, UserPlaylist): twi_child = QtWidgets.QTreeWidgetItem(twi_playlists) name: str = item.name info: str = f"({item.num_tracks + item.num_videos} Tracks)" elif isinstance(item, Playlist): twi_child = QtWidgets.QTreeWidgetItem(twi_mixes) name: str = item.name info: str = f"({item.num_tracks + item.num_videos} Tracks) {item.description}" elif isinstance(item, Mix): twi_child = QtWidgets.QTreeWidgetItem(twi_favorites) name: str = item.title info: str = item.sub_title twi_child.setText(0, name) twi_child.setText(1, info) twi_child.setData(3, QtCore.Qt.ItemDataRole.UserRole, item) # Stop load spinner self.spinner_stop.emit() def _init_tree_lists(self, tree: QtWidgets.QTreeWidget): # Adjust Tree. tree.setColumnWidth(0, 200) tree.setColumnWidth(1, 300) tree.setColumnHidden(2, True) tree.expandAll() # Connect the contextmenu tree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) tree.customContextMenuRequested.connect(self.menu_context_tree_lists) def apply_settings(self, settings: Settings): l_cb = [ {"element": self.cb_quality_audio, "setting": settings.data.quality_audio.name, "default_id": 1}, {"element": self.cb_quality_video, "setting": settings.data.quality_video.name, "default_id": 0}, ] for item in l_cb: idx = item["element"].findText(item["setting"]) if idx > -1: item["element"].setCurrentIndex(idx) else: item["element"].setCurrentIndex(item["default_id"]) def on_spinner_start(self, parent: QtWidgets.QWidget): self.spinner = QtWaitingSpinner(parent, True, True) self.spinner.setColor(QtGui.QColor(255, 255, 255)) self.spinner.start() def on_spinner_stop(self): self.spinner.stop() self.spinner = None def menu_context_tree_lists(self, point): # Infos about the node selected. index = self.tr_lists_user.indexAt(point) # Do not open menu if something went wrong or a parent node is clicked. if not index.isValid() or not index.parent().data(): return # We build the menu. menu = QtWidgets.QMenu() menu.addAction("Download Playlist", lambda: self.thread_download_list_media(point)) menu.exec(self.tr_lists_user.mapToGlobal(point)) def thread_download_list_media(self, point): self.thread_it(self.on_download_list_media, point) self.thread_it(self.list_items_show, point=point) def on_download_list_media(self, point: QtCore.QPoint): item = self.tr_lists_user.itemAt(point) media = item.data(3, QtCore.Qt.ItemDataRole.UserRole) # TODO: Implement disable download button etc. self.download(media, self.dl) def search_populate_results(self, query: str, type_media: SearchTypes): self.tr_results.clear() results: [ResultSearch] = self.search(query, [type_media]) self.populate_tree_results(results) def populate_tree_results(self, results: [ResultSearch]): self.tr_results.clear() for item in results: # Format seconds to mm:ss. m, s = divmod(item.duration_sec, 60) duration: str = f"{m:02d}:{s:02d}" # Since sorting happens only by string, we need to pad the index and add 1 (to avoid start at 0) index: str = f"{item.position + 1:03}" child = QtWidgets.QTreeWidgetItem() child.setText(0, index) child.setText(1, item.artist) child.setText(2, item.title) child.setText(3, item.album) child.setText(4, duration) child.setData(5, QtCore.Qt.ItemDataRole.UserRole, item.obj) self.tr_results.addTopLevelItem(child) def search(self, query: str, types_media: SearchTypes) -> [ResultSearch]: result_search: [dict[str, SearchTypes]] = self.tidal.session.search(query, models=types_media, limit=999) result: [ResultSearch] = [] for _media_type, l_media in result_search.items(): if isinstance(l_media, list): result = result + self.search_result_to_model(l_media) return result def search_result_to_model(self, items: [*SearchTypes]) -> [ResultSearch]: result = [] for idx, item in enumerate(items): if isinstance(item, Track): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name, duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Video): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title=item.name, album=item.album.name if item.album else "", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Playlist): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.promoted_artists) if item.promoted_artists else "", title=item.name, album="", duration_sec=item.duration, obj=item, ) result.append(result_item) elif isinstance(item, Album): result_item: ResultSearch = ResultSearch( position=idx, artist=", ".join(artist.name for artist in item.artists), title="", album=item.name, duration_sec=item.duration, obj=item, ) result.append(result_item) return result def _init_signals(self): self.b_download.clicked.connect(lambda: self.thread_it(self.on_download_results)) self.l_search.returnPressed.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.b_search.clicked.connect( lambda: self.search_populate_results(self.l_search.text(), self.cb_search_type.currentData()) ) self.cb_quality_audio.currentIndexChanged.connect(self.quality_set_audio) self.cb_quality_video.currentIndexChanged.connect(self.quality_set_video) self.tr_lists_user.itemClicked.connect(self.on_list_items_show) self.spinner_start[QtWidgets.QWidget].connect(self.on_spinner_start) self.spinner_stop.connect(self.on_spinner_stop) self.s_item_advance.connect(self.progress_item) self.s_item_name.connect(self.progress_item_name) self.s_list_advance.connect(self.progress_list) self.s_pb_reset.connect(self.progress_reset) self.s_populate_tree_lists.connect(self.on_populate_tree_lists) def progress_list(self, value: float): self.pb_list.setValue(int(math.ceil(value))) def progress_item(self, value: float): self.pb_item.setValue(int(math.ceil(value))) def progress_item_name(self, value: str): self.pb_item.setFormat(f"%p% {value}") def progress_list_name(self, value: str): self.pb_list.setFormat(f"%p% {value}") def quality_set_audio(self, index): self.settings.data.quality_audio = Quality(self.cb_quality_audio.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def quality_set_video(self, index): self.settings.data.quality_video = QualityVideo(self.cb_quality_video.itemData(index).value) self.settings.save() if self.tidal: self.tidal.settings_apply() def on_list_items_show(self, item: QtWidgets.QTreeWidgetItem): media_list: Album | Playlist = item.data(3, QtCore.Qt.ItemDataRole.UserRole) # Only if clicked item is not a top level item. if media_list: self.list_items_show(media_list) def list_items_show(self, media_list: Album | Playlist | None = None, point: QtCore.QPoint | None = None): if point: item = self.tr_lists_user.itemAt(point) media_list = item.data(3, QtCore.Qt.ItemDataRole.UserRole) media_items = media_list.items() result = self.search_result_to_model(media_items) self.populate_tree_results(result) def thread_it(self, fn: Callable, *args, **kwargs): # Any other args, kwargs are passed to the run function
worker = Worker(fn, *args, **kwargs)
11
2023-12-19 23:05:47+00:00
16k
zyrant/SPGroup3D
tests/test_data/test_datasets/test_scannet_dataset.py
[ { "identifier": "ScanNetDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetDataset(Custom3DDataset):\n r\"\"\"ScanNet Dataset for Detection Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_\n for data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'Depth' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n \"\"\"\n CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin')\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n modality=dict(use_camera=False, use_depth=True),\n box_type_3d='Depth',\n filter_empty_gt=True,\n test_mode=False,\n **kwargs):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode,\n **kwargs)\n assert 'use_camera' in self.modality and \\\n 'use_depth' in self.modality\n assert self.modality['use_camera'] or self.modality['use_depth']\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data\n preprocessing pipelines. It includes the following keys:\n\n - sample_idx (str): Sample index.\n - pts_filename (str): Filename of point clouds.\n - file_name (str): Filename of point clouds.\n - img_prefix (str, optional): Prefix of image files.\n - img_info (dict, optional): Image info.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n sample_idx = info['point_cloud']['lidar_idx']\n pts_filename = osp.join(self.data_root, info['pts_path'])\n input_dict = dict(sample_idx=sample_idx)\n\n if self.modality['use_depth']:\n input_dict['pts_filename'] = pts_filename\n input_dict['file_name'] = pts_filename\n\n if self.modality['use_camera']:\n img_info = []\n for img_path in info['img_paths']:\n img_info.append(\n dict(filename=osp.join(self.data_root, img_path)))\n intrinsic = info['intrinsics']\n axis_align_matrix = self._get_axis_align_matrix(info)\n depth2img = []\n for extrinsic in info['extrinsics']:\n depth2img.append(\n intrinsic @ np.linalg.inv(axis_align_matrix @ extrinsic))\n\n input_dict['img_prefix'] = None\n input_dict['img_info'] = img_info\n input_dict['depth2img'] = depth2img\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():\n return None\n return input_dict\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`):\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - pts_instance_mask_path (str): Path of instance masks.\n - pts_semantic_mask_path (str): Path of semantic masks.\n - axis_align_matrix (np.ndarray): Transformation matrix for\n global scene alignment.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n if info['annos']['gt_num'] != 0:\n gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(\n np.float32) # k, 6\n gt_labels_3d = info['annos']['class'].astype(np.int64)\n else:\n gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)\n gt_labels_3d = np.zeros((0, ), dtype=np.int64)\n\n # to target box structure\n gt_bboxes_3d = DepthInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n with_yaw=False,\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n axis_align_matrix = self._get_axis_align_matrix(info)\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path,\n axis_align_matrix=axis_align_matrix)\n return anns_results\n\n def prepare_test_data(self, index):\n \"\"\"Prepare data for testing.\n\n We should take axis_align_matrix from self.data_infos since we need\n to align point clouds.\n\n Args:\n index (int): Index for accessing the target data.\n\n Returns:\n dict: Testing data dict of the corresponding index.\n \"\"\"\n input_dict = self.get_data_info(index)\n # take the axis_align_matrix from data_infos\n input_dict['ann_info'] = dict(\n axis_align_matrix=self._get_axis_align_matrix(\n self.data_infos[index]))\n self.pre_pipeline(input_dict)\n example = self.pipeline(input_dict)\n return example\n\n @staticmethod\n def _get_axis_align_matrix(info):\n \"\"\"Get axis_align_matrix from info. If not exist, return identity mat.\n\n Args:\n info (dict): one data info term.\n\n Returns:\n np.ndarray: 4x4 transformation matrix.\n \"\"\"\n if 'axis_align_matrix' in info['annos'].keys():\n return info['annos']['axis_align_matrix'].astype(np.float32)\n else:\n warnings.warn(\n 'axis_align_matrix is not found in ScanNet data info, please '\n 'use new pre-process scripts to re-generate ScanNet data')\n return np.eye(4).astype(np.float32)\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(type='GlobalAlignment', rotation_axis=2),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._build_default_pipeline()\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points', load_annos=True).numpy()\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']\n gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None\n gt_labels = self.get_ann_info(i)['gt_labels_3d']\n pred_bboxes = result['boxes_3d']\n pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None\n pred_labels = result['labels_3d']\n show_result_v2(points, gt_bboxes, gt_labels,\n pred_bboxes, pred_labels, out_dir, file_name)" }, { "identifier": "ScanNetInstanceSegDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetInstanceSegDataset(Custom3DSegDataset):\n CLASSES = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',\n 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',\n 'garbagebin')\n\n VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34,\n 36, 39)\n\n ALL_CLASS_IDS = tuple(range(41))\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n - pts_semantic_mask_path (str): Path of semantic masks.\n - pts_instance_mask_path (str): Path of instance masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def get_classes_and_palette(self, classes=None, palette=None):\n \"\"\"Get class names of current dataset. Palette is simply ignored for\n instance segmentation.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n Defaults to None.\n palette (Sequence[Sequence[int]]] | np.ndarray | None):\n The palette of segmentation map. If None is given, random\n palette will be generated. Defaults to None.\n \"\"\"\n if classes is not None:\n return classes, None\n return self.CLASSES, None\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=True,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=40),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(\n type='Collect3D',\n keys=['points', 'pts_semantic_mask', 'pts_instance_mask'])\n ]\n return Compose(pipeline)\n\n def evaluate(self,\n results,\n metric=None,\n options=None,\n logger=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in instance segmentation protocol.\n\n Args:\n results (list[dict]): List of results.\n metric (str | list[str]): Metrics to be evaluated.\n options (dict, optional): options for instance_seg_eval.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaults to None.\n show (bool, optional): Whether to visualize.\n Defaults to False.\n out_dir (str, optional): Path to save the visualization results.\n Defaults to None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict: Evaluation results.\n \"\"\"\n assert isinstance(\n results, list), f'Expect results to be list, got {type(results)}.'\n assert len(results) > 0, 'Expect length of results > 0.'\n assert len(results) == len(self.data_infos)\n assert isinstance(\n results[0], dict\n ), f'Expect elements in results to be dict, got {type(results[0])}.'\n\n load_pipeline = self._get_pipeline(pipeline)\n pred_instance_masks = [result['instance_mask'] for result in results]\n pred_instance_labels = [result['instance_label'] for result in results]\n pred_instance_scores = [result['instance_score'] for result in results]\n gt_semantic_masks, gt_instance_masks = zip(*[\n self._extract_data(\n index=i,\n pipeline=load_pipeline,\n key=['pts_semantic_mask', 'pts_instance_mask'],\n load_annos=True) for i in range(len(self.data_infos))\n ])\n ret_dict = instance_seg_eval(\n gt_semantic_masks,\n gt_instance_masks,\n pred_instance_masks,\n pred_instance_labels,\n pred_instance_scores,\n valid_class_ids=self.VALID_CLASS_IDS,\n class_labels=self.CLASSES,\n options=options,\n logger=logger)\n\n if show:\n raise NotImplementedError('show is not implemented for now')\n\n return ret_dict" }, { "identifier": "ScanNetSegDataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetSegDataset(Custom3DSegDataset):\n r\"\"\"ScanNet Dataset for Semantic Segmentation Task.\n\n This class serves as the API for experiments on the ScanNet Dataset.\n\n Please refer to the `github repo <https://github.com/ScanNet/ScanNet>`_\n for data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g.\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (np.ndarray | str, optional): Precomputed index to load\n data. For scenes with many points, we may sample it several times.\n Defaults to None.\n \"\"\"\n CLASSES = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',\n 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',\n 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink',\n 'bathtub', 'otherfurniture')\n\n VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,\n 33, 34, 36, 39)\n\n ALL_CLASS_IDS = tuple(range(41))\n\n PALETTE = [\n [174, 199, 232],\n [152, 223, 138],\n [31, 119, 180],\n [255, 187, 120],\n [188, 189, 34],\n [140, 86, 75],\n [255, 152, 150],\n [214, 39, 40],\n [197, 176, 213],\n [148, 103, 189],\n [196, 156, 148],\n [23, 190, 207],\n [247, 182, 210],\n [219, 219, 141],\n [255, 127, 14],\n [158, 218, 229],\n [44, 160, 44],\n [112, 128, 144],\n [227, 119, 194],\n [82, 84, 163],\n ]\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None,\n **kwargs):\n\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs,\n **kwargs)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=False,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=np.max(self.ALL_CLASS_IDS)),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_sem_mask = self._extract_data(\n i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n pred_sem_mask = result['semantic_mask'].numpy()\n show_seg_result(points, gt_sem_mask,\n pred_sem_mask, out_dir, file_name,\n np.array(self.PALETTE), self.ignore_index, show)\n\n def get_scene_idxs(self, scene_idxs):\n \"\"\"Compute scene_idxs for data sampling.\n\n We sample more times for scenes with more points.\n \"\"\"\n # when testing, we load one whole scene every time\n if not self.test_mode and scene_idxs is None:\n raise NotImplementedError(\n 'please provide re-sampled scene indexes for training')\n\n return super().get_scene_idxs(scene_idxs)\n\n def format_results(self, results, txtfile_prefix=None):\n r\"\"\"Format the results to txt file. Refer to `ScanNet documentation\n <http://kaldir.vc.in.tum.de/scannet_benchmark/documentation>`_.\n\n Args:\n outputs (list[dict]): Testing results of the dataset.\n txtfile_prefix (str): The prefix of saved files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (outputs, tmp_dir), outputs is the detection results,\n tmp_dir is the temporal directory created for saving submission\n files when ``submission_prefix`` is not specified.\n \"\"\"\n import mmcv\n\n if txtfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n txtfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n mmcv.mkdir_or_exist(txtfile_prefix)\n\n # need to map network output to original label idx\n pred2label = np.zeros(len(self.VALID_CLASS_IDS)).astype(np.int)\n for original_label, output_idx in self.label_map.items():\n if output_idx != self.ignore_index:\n pred2label[output_idx] = original_label\n\n outputs = []\n for i, result in enumerate(results):\n info = self.data_infos[i]\n sample_idx = info['point_cloud']['lidar_idx']\n pred_sem_mask = result['semantic_mask'].numpy().astype(np.int)\n pred_label = pred2label[pred_sem_mask]\n curr_file = f'{txtfile_prefix}/{sample_idx}.txt'\n np.savetxt(curr_file, pred_label, fmt='%d')\n outputs.append(dict(seg_mask=pred_label))\n\n return outputs, tmp_dir" }, { "identifier": "ScanNetInstanceSegV2Dataset", "path": "mmdet3d/datasets/scannet_dataset.py", "snippet": "class ScanNetInstanceSegV2Dataset(ScanNetDataset):\n VALID_CLASS_IDS = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28,\n 33, 34, 36, 39)\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=True,\n with_seg_3d=True),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(\n type='Collect3D',\n keys=['points', 'pts_semantic_mask', 'pts_instance_mask'])\n ]\n return Compose(pipeline)\n\n def evaluate(self,\n results,\n metric=None,\n options=None,\n logger=None,\n show=False,\n out_dir=None,\n pipeline=None):\n \"\"\"Evaluation in instance segmentation protocol.\n\n Args:\n results (list[dict]): List of results.\n metric (str | list[str]): Metrics to be evaluated.\n options (dict, optional): options for instance_seg_eval.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Defaults to None.\n show (bool, optional): Whether to visualize.\n Defaults to False.\n out_dir (str, optional): Path to save the visualization results.\n Defaults to None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict: Evaluation results.\n \"\"\"\n assert isinstance(\n results, list), f'Expect results to be list, got {type(results)}.'\n assert len(results) > 0, 'Expect length of results > 0.'\n assert len(results) == len(self.data_infos)\n assert isinstance(\n results[0], dict\n ), f'Expect elements in results to be dict, got {type(results[0])}.'\n\n load_pipeline = self._build_default_pipeline()\n pred_instance_masks = [result['instance_mask'] for result in results]\n pred_instance_labels = [result['instance_label'] for result in results]\n pred_instance_scores = [result['instance_score'] for result in results]\n gt_semantic_masks, gt_instance_masks = zip(*[\n self._extract_data(\n index=i,\n pipeline=load_pipeline,\n key=['pts_semantic_mask', 'pts_instance_mask'],\n load_annos=True) for i in range(len(self.data_infos))\n ])\n ret_dict = instance_seg_eval_v2(\n gt_semantic_masks,\n gt_instance_masks,\n pred_instance_masks,\n pred_instance_labels,\n pred_instance_scores,\n valid_class_ids=self.VALID_CLASS_IDS,\n class_labels=self.CLASSES,\n options=options,\n logger=logger)\n\n if show:\n self.show(results, out_dir)\n\n return ret_dict\n\n def show(self, results, out_dir, show=True, pipeline=None):\n assert out_dir is not None, 'Expect out_dir, got none.'\n load_pipeline = self._build_default_pipeline()\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_instance_mask, gt_sem_mask = self._extract_data(\n i, load_pipeline, ['points', 'pts_instance_mask', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n gt_inst_mask_final = np.zeros_like(gt_instance_mask)\n for cls_idx in self.VALID_CLASS_IDS:\n mask = gt_sem_mask == cls_idx\n gt_inst_mask_final += mask.numpy()\n gt_instance_mask[gt_inst_mask_final == 0] = -1\n\n pred_instance_masks = result['instance_mask']\n pred_instance_scores = result['instance_score']\n\n pred_instance_masks_sort = pred_instance_masks[pred_instance_scores.argsort()]\n pred_instance_masks_label = pred_instance_masks_sort[0].long() - 1\n for i in range(1, pred_instance_masks_sort.shape[0]):\n pred_instance_masks_label[pred_instance_masks_sort[i]] = i\n\n palette = np.random.random((max(max(pred_instance_masks_label) + 2, max(gt_instance_mask) + 2), 3)) * 255\n palette[-1] = 255\n\n show_seg_result(points, gt_instance_mask,\n pred_instance_masks_label, out_dir, file_name,\n palette)" } ]
import copy import numpy as np import pytest import torch import tempfile import tempfile import mmcv import tempfile import tempfile import mmcv import mmcv from mmdet3d.datasets import (ScanNetDataset, ScanNetInstanceSegDataset, ScanNetSegDataset, ScanNetInstanceSegV2Dataset) from mmdet3d.core.bbox.structures import DepthInstance3DBoxes from os import path as osp from mmdet3d.core.bbox import DepthInstance3DBoxes from os import path as osp from os import path as osp
11,054
3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # test show with pipeline tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_format_results(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) result_files, tmp_dir = scannet_dataset.format_results(results) expected_label = np.array([ 16, 6, 2, 3, 7, 3, 16, 2, 24, 3, 1, 1, 6, 6, 4, 1, 2, 24, 1, 1, 1, 36, 7, 28, 16, 1, 3, 5, 1, 4, 33, 7, 16, 6, 16, 1, 1, 1, 1, 2, 8, 4, 39, 14, 9, 1, 12, 1, 1, 2, 3, 16, 34, 2, 2, 2, 7, 3, 16, 39, 5, 34, 1, 24, 2, 8, 3, 2, 8, 3, 1, 6, 34, 6, 1, 1, 4, 7, 6, 12, 2, 16, 16, 3, 4, 2, 1, 16, 39, 2, 24, 6, 4, 2, 16, 2, 3, 4, 3, 2 ]) expected_txt_path = osp.join(tmp_dir.name, 'results', 'scene0000_00.txt') assert np.all(result_files[0]['seg_mask'] == expected_label) mmcv.check_file_exist(expected_txt_path) def test_instance_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') train_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ]
# Copyright (c) OpenMMLab. All rights reserved. def test_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=True, load_dim=6, use_dim=[0, 1, 2]), dict( type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_mask_3d=True, with_seg_3d=True), dict(type='GlobalAlignment', rotation_axis=2), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)), dict(type='PointSample', num_points=5), dict( type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=1.0, flip_ratio_bev_vertical=1.0), dict( type='GlobalRotScaleTrans', rot_range=[-0.087266, 0.087266], scale_ratio_range=[1.0, 1.0], shift_height=True), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=[ 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', 'pts_instance_mask' ], meta_keys=['file_name', 'sample_idx', 'pcd_rotation']), ] scannet_dataset = ScanNetDataset(root_path, ann_file, pipelines) data = scannet_dataset[0] points = data['points']._data gt_bboxes_3d = data['gt_bboxes_3d']._data gt_labels = data['gt_labels_3d']._data pts_semantic_mask = data['pts_semantic_mask']._data pts_instance_mask = data['pts_instance_mask']._data file_name = data['img_metas']._data['file_name'] pcd_rotation = data['img_metas']._data['pcd_rotation'] sample_idx = data['img_metas']._data['sample_idx'] expected_rotation = np.array([[0.99654, 0.08311407, 0.], [-0.08311407, 0.99654, 0.], [0., 0., 1.]]) assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert np.allclose(pcd_rotation, expected_rotation, 1e-3) assert sample_idx == 'scene0000_00' expected_points = torch.tensor( [[1.8339e+00, 2.1093e+00, 2.2900e+00, 2.3895e+00], [3.6079e+00, 1.4592e-01, 2.0687e+00, 2.1682e+00], [4.1886e+00, 5.0614e+00, -1.0841e-01, -8.8736e-03], [6.8790e+00, 1.5086e+00, -9.3154e-02, 6.3816e-03], [4.8253e+00, 2.6668e-01, 1.4917e+00, 1.5912e+00]]) expected_gt_bboxes_3d = torch.tensor( [[-1.1835, -3.6317, 1.5704, 1.7577, 0.3761, 0.5724, 0.0000], [-3.1832, 3.2269, 1.1911, 0.6727, 0.2251, 0.6715, 0.0000], [-0.9598, -2.2864, 0.0093, 0.7506, 2.5709, 1.2145, 0.0000], [-2.6988, -2.7354, 0.8288, 0.7680, 1.8877, 0.2870, 0.0000], [3.2989, 0.2885, -0.0090, 0.7600, 3.8814, 2.1603, 0.0000]]) expected_gt_labels = np.array([ 6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0, 0, 0, 0, 5, 5, 5 ]) expected_pts_semantic_mask = np.array([0, 18, 18, 18, 18]) expected_pts_instance_mask = np.array([44, 22, 10, 10, 57]) original_classes = scannet_dataset.CLASSES assert scannet_dataset.CLASSES == class_names assert torch.allclose(points, expected_points, 1e-2) assert gt_bboxes_3d.tensor[:5].shape == (5, 7) assert torch.allclose(gt_bboxes_3d.tensor[:5], expected_gt_bboxes_3d, 1e-2) assert np.all(gt_labels.numpy() == expected_gt_labels) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask) assert original_classes == class_names scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=['cabinet', 'bed']) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=('cabinet', 'bed')) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ('cabinet', 'bed') # Test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nbed\n') scannet_dataset = ScanNetDataset( root_path, ann_file, pipeline=None, classes=path) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'bed'] def test_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) results = [] pred_boxes = dict() pred_boxes['boxes_3d'] = DepthInstance3DBoxes( torch.tensor([[ 1.4813e+00, 3.5207e+00, 1.5704e+00, 1.7445e+00, 2.3196e-01, 5.7235e-01, 0.0000e+00 ], [ 2.9040e+00, -3.4803e+00, 1.1911e+00, 6.6078e-01, 1.7072e-01, 6.7154e-01, 0.0000e+00 ], [ 1.1466e+00, 2.1987e+00, 9.2576e-03, 5.4184e-01, 2.5346e+00, 1.2145e+00, 0.0000e+00 ], [ 2.9168e+00, 2.5016e+00, 8.2875e-01, 6.1697e-01, 1.8428e+00, 2.8697e-01, 0.0000e+00 ], [ -3.3114e+00, -1.3351e-02, -8.9524e-03, 4.4082e-01, 3.8582e+00, 2.1603e+00, 0.0000e+00 ], [ -2.0135e+00, -3.4857e+00, 9.3848e-01, 1.9911e+00, 2.1603e-01, 1.2767e+00, 0.0000e+00 ], [ -2.1945e+00, -3.1402e+00, -3.8165e-02, 1.4801e+00, 6.8676e-01, 1.0586e+00, 0.0000e+00 ], [ -2.7553e+00, 2.4055e+00, -2.9972e-02, 1.4764e+00, 1.4927e+00, 2.3380e+00, 0.0000e+00 ]])) pred_boxes['labels_3d'] = torch.tensor([6, 6, 4, 9, 11, 11]) pred_boxes['scores_3d'] = torch.tensor([0.5, 1.0, 1.0, 1.0, 1.0, 0.5]) results.append(pred_boxes) metric = [0.25, 0.5] ret_dict = scannet_dataset.evaluate(results, metric) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 # test evaluate with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] ret_dict = scannet_dataset.evaluate( results, metric, pipeline=eval_pipeline) assert abs(ret_dict['table_AP_0.25'] - 0.3333) < 0.01 assert abs(ret_dict['window_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['counter_AP_0.25'] - 1.0) < 0.01 assert abs(ret_dict['curtain_AP_0.25'] - 1.0) < 0.01 def test_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetDataset(root_path, ann_file) boxes_3d = DepthInstance3DBoxes( torch.tensor([[ -2.4053e+00, 9.2295e-01, 8.0661e-02, 2.4054e+00, 2.1468e+00, 8.5990e-01, 0.0000e+00 ], [ -1.9341e+00, -2.0741e+00, 3.0698e-03, 3.2206e-01, 2.5322e-01, 3.5144e-01, 0.0000e+00 ], [ -3.6908e+00, 8.0684e-03, 2.6201e-01, 4.1515e-01, 7.6489e-01, 5.3585e-01, 0.0000e+00 ], [ 2.6332e+00, 8.5143e-01, -4.9964e-03, 3.0367e-01, 1.3448e+00, 1.8329e+00, 0.0000e+00 ], [ 2.0221e-02, 2.6153e+00, 1.5109e-02, 7.3335e-01, 1.0429e+00, 1.0251e+00, 0.0000e+00 ]])) scores_3d = torch.tensor( [1.2058e-04, 2.3012e-03, 6.2324e-06, 6.6139e-06, 6.7965e-05]) labels_3d = torch.tensor([0, 0, 0, 0, 0]) result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # show function with pipeline class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, load_dim=6, use_dim=[0, 1, 2]), dict(type='GlobalAlignment', rotation_axis=2), dict( type='DefaultFormatBundle3D', class_names=class_names, with_label=False), dict(type='Collect3D', keys=['points']) ] tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') palette = [ [174, 199, 232], [152, 223, 138], [31, 119, 180], [255, 187, 120], [188, 189, 34], [140, 86, 75], [255, 152, 150], [214, 39, 40], [197, 176, 213], [148, 103, 189], [196, 156, 148], [23, 190, 207], [247, 182, 210], [219, 219, 141], [255, 127, 14], [158, 218, 229], [44, 160, 44], [112, 128, 144], [227, 119, 194], [82, 84, 163], ] scene_idxs = [0 for _ in range(20)] # test network inputs are (xyz, rgb, normalized_xyz) pipelines = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=True, enlarge_size=0.2, min_unique_num=None), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask'], meta_keys=['file_name', 'sample_idx']) ] scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=pipelines, classes=None, palette=None, modality=None, test_mode=False, ignore_index=None, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data pts_semantic_mask = data['pts_semantic_mask']._data file_name = data['img_metas']._data['file_name'] sample_idx = data['img_metas']._data['sample_idx'] assert file_name == './tests/data/scannet/points/scene0000_00.bin' assert sample_idx == 'scene0000_00' expected_points = torch.tensor([[ 0.0000, 0.0000, 1.2427, 0.6118, 0.5529, 0.4471, -0.6462, -1.0046, 0.4280 ], [ 0.1553, -0.0074, 1.6077, 0.5882, 0.6157, 0.5569, -0.6001, -1.0068, 0.5537 ], [ 0.1518, 0.6016, 0.6548, 0.1490, 0.1059, 0.0431, -0.6012, -0.8309, 0.2255 ], [ -0.7494, 0.1033, 0.6756, 0.5216, 0.4353, 0.3333, -0.8687, -0.9748, 0.2327 ], [ -0.6836, -0.0203, 0.5884, 0.5765, 0.5020, 0.4510, -0.8491, -1.0105, 0.2027 ]]) expected_pts_semantic_mask = np.array([13, 13, 12, 2, 0]) original_classes = scannet_dataset.CLASSES original_palette = scannet_dataset.PALETTE assert scannet_dataset.CLASSES == class_names assert scannet_dataset.ignore_index == 20 assert torch.allclose(points, expected_points, 1e-2) assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask) assert original_classes == class_names assert original_palette == palette assert scannet_dataset.scene_idxs.dtype == np.int32 assert np.all(scannet_dataset.scene_idxs == np.array(scene_idxs)) # test network inputs are (xyz, rgb) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[3] = dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, min_unique_num=None) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, :6], 1e-2) # test network inputs are (xyz, normalized_xyz) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[0] = dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=False, load_dim=6, use_dim=[0, 1, 2]) new_pipelines.remove(new_pipelines[4]) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, [0, 1, 2, 6, 7, 8]], 1e-2) # test network inputs are (xyz,) np.random.seed(0) new_pipelines = copy.deepcopy(pipelines) new_pipelines[0] = dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=False, load_dim=6, use_dim=[0, 1, 2]) new_pipelines[3] = dict( type='IndoorPatchPointSample', num_points=5, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, min_unique_num=None) new_pipelines.remove(new_pipelines[4]) scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=new_pipelines, scene_idxs=scene_idxs) data = scannet_dataset[0] points = data['points']._data assert torch.allclose(points, expected_points[:, :3], 1e-2) # test dataset with selected classes scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, classes=['cabinet', 'chair'], scene_idxs=scene_idxs) label_map = {i: 20 for i in range(41)} label_map.update({3: 0, 5: 1}) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'chair'] assert scannet_dataset.PALETTE == [palette[2], palette[4]] assert scannet_dataset.VALID_CLASS_IDS == [3, 5] assert scannet_dataset.label_map == label_map assert scannet_dataset.label2cat == {0: 'cabinet', 1: 'chair'} # test load classes from file with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + 'classes.txt' with open(path, 'w') as f: f.write('cabinet\nchair\n') scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, classes=path, scene_idxs=scene_idxs) assert scannet_dataset.CLASSES != original_classes assert scannet_dataset.CLASSES == ['cabinet', 'chair'] assert scannet_dataset.PALETTE == [palette[2], palette[4]] assert scannet_dataset.VALID_CLASS_IDS == [3, 5] assert scannet_dataset.label_map == label_map assert scannet_dataset.label2cat == {0: 'cabinet', 1: 'chair'} # test scene_idxs in dataset # we should input scene_idxs in train mode with pytest.raises(NotImplementedError): scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, scene_idxs=None) # test mode scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, pipeline=None, test_mode=True, scene_idxs=scene_idxs) assert np.all(scannet_dataset.scene_idxs == np.array([0])) def test_seg_evaluate(): if not torch.cuda.is_available(): pytest.skip() root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] ret_dict = scannet_dataset.evaluate(results, pipeline=eval_pipeline) assert abs(ret_dict['miou'] - 0.5308) < 0.01 assert abs(ret_dict['acc'] - 0.8219) < 0.01 assert abs(ret_dict['acc_cls'] - 0.7649) < 0.01 def test_seg_show(): tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, scene_idxs=[0]) result = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results = [result] scannet_dataset.show(results, temp_dir, show=False) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() # test show with pipeline tmp_dir = tempfile.TemporaryDirectory() temp_dir = tmp_dir.name class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'otherfurniture') eval_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) ] scannet_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline) pts_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_points.obj') gt_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_gt.obj') pred_file_path = osp.join(temp_dir, 'scene0000_00', 'scene0000_00_pred.obj') mmcv.check_file_exist(pts_file_path) mmcv.check_file_exist(gt_file_path) mmcv.check_file_exist(pred_file_path) tmp_dir.cleanup() def test_seg_format_results(): root_path = './tests/data/scannet' ann_file = './tests/data/scannet/scannet_infos.pkl' scannet_dataset = ScanNetSegDataset( data_root=root_path, ann_file=ann_file, test_mode=True) results = [] pred_sem_mask = dict( semantic_mask=torch.tensor([ 13, 5, 1, 2, 6, 2, 13, 1, 14, 2, 0, 0, 5, 5, 3, 0, 1, 14, 0, 0, 0, 18, 6, 15, 13, 0, 2, 4, 0, 3, 16, 6, 13, 5, 13, 0, 0, 0, 0, 1, 7, 3, 19, 12, 8, 0, 11, 0, 0, 1, 2, 13, 17, 1, 1, 1, 6, 2, 13, 19, 4, 17, 0, 14, 1, 7, 2, 1, 7, 2, 0, 5, 17, 5, 0, 0, 3, 6, 5, 11, 1, 13, 13, 2, 3, 1, 0, 13, 19, 1, 14, 5, 3, 1, 13, 1, 2, 3, 2, 1 ]).long()) results.append(pred_sem_mask) result_files, tmp_dir = scannet_dataset.format_results(results) expected_label = np.array([ 16, 6, 2, 3, 7, 3, 16, 2, 24, 3, 1, 1, 6, 6, 4, 1, 2, 24, 1, 1, 1, 36, 7, 28, 16, 1, 3, 5, 1, 4, 33, 7, 16, 6, 16, 1, 1, 1, 1, 2, 8, 4, 39, 14, 9, 1, 12, 1, 1, 2, 3, 16, 34, 2, 2, 2, 7, 3, 16, 39, 5, 34, 1, 24, 2, 8, 3, 2, 8, 3, 1, 6, 34, 6, 1, 1, 4, 7, 6, 12, 2, 16, 16, 3, 4, 2, 1, 16, 39, 2, 24, 6, 4, 2, 16, 2, 3, 4, 3, 2 ]) expected_txt_path = osp.join(tmp_dir.name, 'results', 'scene0000_00.txt') assert np.all(result_files[0]['seg_mask'] == expected_label) mmcv.check_file_exist(expected_txt_path) def test_instance_seg_getitem(): np.random.seed(0) root_path = './tests/data/scannet/' ann_file = './tests/data/scannet/scannet_infos.pkl' class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', 'garbagebin') train_pipeline = [ dict( type='LoadPointsFromFile', coord_type='DEPTH', shift_height=False, use_color=True, load_dim=6, use_dim=[0, 1, 2, 3, 4, 5]), dict( type='LoadAnnotations3D', with_bbox_3d=False, with_label_3d=False, with_mask_3d=True, with_seg_3d=True), dict( type='PointSegClassMapping', valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), max_cat_id=40), dict(type='NormalizePointsColor', color_mean=None), dict(type='DefaultFormatBundle3D', class_names=class_names), dict( type='Collect3D', keys=['points', 'pts_semantic_mask', 'pts_instance_mask']) ]
scannet_dataset = ScanNetInstanceSegDataset(
1
2023-12-21 12:50:35+00:00
16k
v3ucn/Bert-vits2-V2.2
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n self.empty_emo = torch.squeeze(\n torch.load(\"empty_emo.npy\", map_location=\"cpu\"), dim=1\n )\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n if np.random.rand() > 0.1:\n emo = torch.squeeze(\n torch.load(audiopath.replace(\".wav\", \".emo.npy\"), map_location=\"cpu\"),\n dim=1,\n )\n else:\n emo = self.empty_emo\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert, emo)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.rand(1024, len(phone))\n en_bert = torch.rand(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.rand(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.rand(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.rand(1024, len(phone))\n ja_bert = torch.rand(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n emo = torch.FloatTensor(len(batch), 512)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n emo.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n emo[i, :] = row[9]\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n emo,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n self.n_speakers,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, loss_commit = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n g,\n loss_commit,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n emo=None,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask, _ = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, emo, sid, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,642
def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) emo = emo.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), g, loss_commit, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False):
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) dur_resume_lr = None if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], find_unused_parameters=True, bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) emo = emo.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), g, loss_commit, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, emo, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False):
loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(
8
2023-12-18 04:54:46+00:00
16k
m-abr/FCPCodebase
world/Robot.py
[ { "identifier": "Math_Ops", "path": "math_ops/Math_Ops.py", "snippet": "class Math_Ops():\n '''\n This class provides general mathematical operations that are not directly available through numpy \n '''\n \n @staticmethod\n def deg_sph2cart(spherical_vec):\n ''' Converts SimSpark's spherical coordinates in degrees to cartesian coordinates '''\n r = spherical_vec[0]\n h = spherical_vec[1] * pi / 180\n v = spherical_vec[2] * pi / 180\n return np.array([r * cos(v) * cos(h), r * cos(v) * sin(h), r * sin(v)])\n\n @staticmethod\n def deg_sin(deg_angle):\n ''' Returns sin of degrees '''\n return sin(deg_angle * pi / 180)\n\n @staticmethod\n def deg_cos(deg_angle):\n ''' Returns cos of degrees '''\n return cos(deg_angle * pi / 180)\n\n @staticmethod\n def to_3d(vec_2d, value=0) -> np.ndarray:\n ''' Returns new 3d vector from 2d vector '''\n return np.append(vec_2d,value)\n\n @staticmethod\n def to_2d_as_3d(vec_3d) -> np.ndarray:\n ''' Returns new 3d vector where the 3rd dimension is zero '''\n vec_2d_as_3d = np.copy(vec_3d)\n vec_2d_as_3d[2] = 0\n return vec_2d_as_3d\n\n @staticmethod\n def normalize_vec(vec) -> np.ndarray:\n ''' Divides vector by its length '''\n size = np.linalg.norm(vec)\n if size == 0: return vec\n return vec / size\n\n @staticmethod\n def get_active_directory(dir:str) -> str:\n global GLOBAL_DIR\n return GLOBAL_DIR + dir\n\n @staticmethod\n def acos(val):\n ''' arccosine function that limits input '''\n return acos( np.clip(val,-1,1) )\n \n @staticmethod\n def asin(val):\n ''' arcsine function that limits input '''\n return asin( np.clip(val,-1,1) )\n\n @staticmethod\n def normalize_deg(val):\n ''' normalize val in range [-180,180[ '''\n return (val + 180.0) % 360 - 180\n\n @staticmethod\n def normalize_rad(val):\n ''' normalize val in range [-pi,pi[ '''\n return (val + pi) % (2*pi) - pi\n\n @staticmethod\n def deg_to_rad(val):\n ''' convert degrees to radians '''\n return val * 0.01745329251994330\n\n @staticmethod\n def rad_to_deg(val):\n ''' convert radians to degrees '''\n return val * 57.29577951308232\n\n @staticmethod\n def vector_angle(vector, is_rad=False):\n ''' angle (degrees or radians) of 2D vector '''\n if is_rad:\n return atan2(vector[1], vector[0])\n else:\n return atan2(vector[1], vector[0]) * 180 / pi\n\n @staticmethod\n def vectors_angle(vec1, vec2, is_rad=False):\n ''' get angle between vectors (degrees or radians) '''\n ang_rad = acos(np.dot(Math_Ops.normalize_vec(vec1),Math_Ops.normalize_vec(vec2)))\n return ang_rad if is_rad else ang_rad * 180 / pi\n\n @staticmethod\n def vector_from_angle(angle, is_rad=False):\n ''' unit vector with direction given by `angle` '''\n if is_rad:\n return np.array([cos(angle), sin(angle)], float)\n else:\n return np.array([Math_Ops.deg_cos(angle), Math_Ops.deg_sin(angle)], float)\n\n @staticmethod\n def target_abs_angle(pos2d, target, is_rad=False):\n ''' angle (degrees or radians) of vector (target-pos2d) '''\n if is_rad:\n return atan2(target[1]-pos2d[1], target[0]-pos2d[0])\n else:\n return atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi\n\n @staticmethod\n def target_rel_angle(pos2d, ori, target, is_rad=False):\n ''' relative angle (degrees or radians) of target if we're located at 'pos2d' with orientation 'ori' (degrees or radians) '''\n if is_rad:\n return Math_Ops.normalize_rad( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) - ori )\n else:\n return Math_Ops.normalize_deg( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi - ori )\n\n @staticmethod\n def rotate_2d_vec(vec, angle, is_rad=False):\n ''' rotate 2D vector anticlockwise around the origin by `angle` '''\n cos_ang = cos(angle) if is_rad else cos(angle * pi / 180)\n sin_ang = sin(angle) if is_rad else sin(angle * pi / 180)\n return np.array([cos_ang*vec[0]-sin_ang*vec[1], sin_ang*vec[0]+cos_ang*vec[1]])\n\n @staticmethod\n def distance_point_to_line(p:np.ndarray, a:np.ndarray, b:np.ndarray):\n ''' \n Distance between point p and 2d line 'ab' (and side where p is)\n\n Parameters\n ----------\n a : ndarray\n 2D point that defines line\n b : ndarray\n 2D point that defines line\n p : ndarray\n 2D point\n\n Returns\n -------\n distance : float\n distance between line and point\n side : str\n if we are at a, looking at b, p may be at our \"left\" or \"right\"\n '''\n line_len = np.linalg.norm(b-a)\n\n if line_len == 0: # assumes vertical line\n dist = sdist = np.linalg.norm(p-a)\n else:\n sdist = np.cross(b-a,p-a)/line_len\n dist = abs(sdist)\n\n return dist, \"left\" if sdist>0 else \"right\"\n\n @staticmethod\n def distance_point_to_segment(p:np.ndarray, a:np.ndarray, b:np.ndarray):\n ''' Distance from point p to 2d line segment 'ab' '''\n \n ap = p-a\n ab = b-a\n\n ad = Math_Ops.vector_projection(ap,ab)\n\n # Is d in ab? We can find k in (ad = k * ab) without computing any norm\n # we use the largest dimension of ab to avoid division by 0\n k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]\n\n if k <= 0: return np.linalg.norm(ap)\n elif k >= 1: return np.linalg.norm(p-b)\n else: return np.linalg.norm(p-(ad + a)) # p-d\n\n @staticmethod\n def distance_point_to_ray(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):\n ''' Distance from point p to 2d ray '''\n \n rp = p-ray_start\n rd = Math_Ops.vector_projection(rp,ray_direction)\n\n # Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm\n # we use the largest dimension of ray_direction to avoid division by 0\n k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]\n\n if k <= 0: return np.linalg.norm(rp)\n else: return np.linalg.norm(p-(rd + ray_start)) # p-d\n\n @staticmethod\n def closest_point_on_ray_to_point(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):\n ''' Point on ray closest to point p '''\n \n rp = p-ray_start\n rd = Math_Ops.vector_projection(rp,ray_direction)\n\n # Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm\n # we use the largest dimension of ray_direction to avoid division by 0\n k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]\n\n if k <= 0: return ray_start\n else: return rd + ray_start\n\n @staticmethod\n def does_circle_intersect_segment(p:np.ndarray, r, a:np.ndarray, b:np.ndarray):\n ''' Returns true if circle (center p, radius r) intersect 2d line segment '''\n\n ap = p-a\n ab = b-a\n\n ad = Math_Ops.vector_projection(ap,ab)\n\n # Is d in ab? We can find k in (ad = k * ab) without computing any norm\n # we use the largest dimension of ab to avoid division by 0\n k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]\n\n if k <= 0: return np.dot(ap,ap) <= r*r\n elif k >= 1: return np.dot(p-b,p-b) <= r*r\n \n dp = p-(ad + a)\n return np.dot(dp,dp) <= r*r\n\n @staticmethod\n def vector_projection(a:np.ndarray, b:np.ndarray):\n ''' Vector projection of a onto b '''\n b_dot = np.dot(b,b)\n return b * np.dot(a,b) / b_dot if b_dot != 0 else b\n\n @staticmethod\n def do_noncollinear_segments_intersect(a,b,c,d):\n ''' \n Check if 2d line segment 'ab' intersects with noncollinear 2d line segment 'cd' \n Explanation: https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/ \n '''\n\n ccw = lambda a,b,c: (c[1]-a[1]) * (b[0]-a[0]) > (b[1]-a[1]) * (c[0]-a[0])\n return ccw(a,c,d) != ccw(b,c,d) and ccw(a,b,c) != ccw(a,b,d)\n\n @staticmethod\n def intersection_segment_opp_goal(a:np.ndarray, b:np.ndarray):\n ''' Computes the intersection point of 2d segment 'ab' and the opponents' goal (front line) '''\n vec_x = b[0]-a[0]\n\n # Collinear intersections are not accepted\n if vec_x == 0: return None\n \n k = (15.01-a[0])/vec_x\n\n # No collision\n if k < 0 or k > 1: return None\n\n intersection_pt = a + (b-a) * k\n\n if -1.01 <= intersection_pt[1] <= 1.01:\n return intersection_pt\n else:\n return None\n\n @staticmethod\n def intersection_circle_opp_goal(p:np.ndarray, r):\n ''' \n Computes the intersection segment of circle (center p, radius r) and the opponents' goal (front line)\n Only the y coordinates are returned since the x coordinates are always equal to 15\n '''\n\n x_dev = abs(15-p[0])\n\n if x_dev > r:\n return None # no intersection with x=15\n\n y_dev = sqrt(r*r - x_dev*x_dev)\n\n p1 = max(p[1] - y_dev, -1.01)\n p2 = min(p[1] + y_dev, 1.01)\n\n if p1 == p2:\n return p1 # return the y coordinate of a single intersection point\n elif p2 < p1:\n return None # no intersection\n else:\n return p1, p2 # return the y coordinates of the intersection segment\n\n\n @staticmethod\n def distance_point_to_opp_goal(p:np.ndarray):\n ''' Distance between point 'p' and the opponents' goal (front line) '''\n\n if p[1] < -1.01:\n return np.linalg.norm( p-(15,-1.01) )\n elif p[1] > 1.01:\n return np.linalg.norm( p-(15, 1.01) )\n else:\n return abs(15-p[0])\n\n\n @staticmethod\n def circle_line_segment_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9):\n \"\"\" Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points.\n\n :param circle_center: The (x, y) location of the circle center\n :param circle_radius: The radius of the circle\n :param pt1: The (x, y) location of the first point of the segment\n :param pt2: The (x, y) location of the second point of the segment\n :param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment.\n :param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent\n :return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment.\n\n Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html\n \"\"\"\n\n (p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center\n (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy)\n dx, dy = (x2 - x1), (y2 - y1)\n dr = (dx ** 2 + dy ** 2)**.5\n big_d = x1 * y2 - x2 * y1\n discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2\n\n if discriminant < 0: # No intersection between circle and line\n return []\n else: # There may be 0, 1, or 2 intersections with the segment\n intersections = [\n (cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2,\n cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2)\n for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct\n if not full_line: # If only considering the segment, filter out intersections that do not fall within the segment\n fraction_along_segment = [\n (xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections]\n intersections = [pt for pt, frac in zip(\n intersections, fraction_along_segment) if 0 <= frac <= 1]\n # If line is tangent to circle, return just one point (as both intersections have same location)\n if len(intersections) == 2 and abs(discriminant) <= tangent_tol:\n return [intersections[0]]\n else:\n return intersections\n\n\n\n\n # adapted from https://stackoverflow.com/questions/3252194/numpy-and-line-intersections\n @staticmethod\n def get_line_intersection(a1, a2, b1, b2):\n \"\"\" \n Returns the point of intersection of the lines passing through a2,a1 and b2,b1.\n a1: [x, y] a point on the first line\n a2: [x, y] another point on the first line\n b1: [x, y] a point on the second line\n b2: [x, y] another point on the second line\n \"\"\"\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return np.array([float('inf'), float('inf')])\n return np.array([x/z, y/z],float)" }, { "identifier": "Matrix_3x3", "path": "math_ops/Matrix_3x3.py", "snippet": "class Matrix_3x3():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_3x3( ) # create identity matrix\n b = Matrix_3x3( [[1,1,1],[2,2,2],[3,3,3]] ) # manually initialize matrix\n c = Matrix_3x3( [1,1,1,2,2,2,3,3,3] ) # manually initialize matrix\n d = Matrix_3x3( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(3)\n elif type(matrix) == Matrix_3x3: \n self.m = np.copy(matrix.m)\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (3,3) #reshape if needed, throw error if impossible\n\n\n self.rotation_shortcuts={(1,0,0):self.rotate_x_rad, (-1, 0, 0):self._rotate_x_neg_rad,\n (0,1,0):self.rotate_y_rad, ( 0,-1, 0):self._rotate_y_neg_rad,\n (0,0,1):self.rotate_z_rad, ( 0, 0,-1):self._rotate_z_neg_rad}\n\n @classmethod\n def from_rotation_deg(cls, euler_vec):\n '''\n Create rotation matrix from Euler angles, in degrees.\n Rotation order: RotZ*RotY*RotX\n\n Parameters\n ----------\n euler_vec : array_like, length 3\n vector with Euler angles (x,y,z) aka (roll, pitch, yaw)\n\n Example\n ----------\n Matrix_3x3.from_rotation_deg((roll,pitch,yaw)) # Creates: RotZ(yaw)*RotY(pitch)*RotX(roll)\n '''\n mat = cls().rotate_z_deg(euler_vec[2], True).rotate_y_deg(euler_vec[1], True).rotate_x_deg(euler_vec[0], True)\n return mat\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n\n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(self.m[2,2]) * 180 / pi)\n\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: return\n\n shortcut = self.rotation_shortcuts.get(tuple(a for a in rotation_vec))\n if shortcut:\n return shortcut(rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys],\n [xyc1 + zs, yyc1 + c, yzc1 - xs],\n [xzc1 - ys, yzc1 + xs, zzc1 + c]])\n\n return self.multiply(mat, in_place)\n\n\n def _rotate_x_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_x_rad(-rotation_rad, in_place)\n\n def _rotate_y_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_y_rad(-rotation_rad, in_place)\n\n def _rotate_z_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_z_rad(-rotation_rad, in_place)\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0],\n [0, c,-s],\n [0, s, c]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s],\n [ 0, 1, 0],\n [-s, 0, c]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0],\n [ s, c, 0],\n [ 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current rotation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_3x3(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False, reverse_order=False):\n '''\n Multiplies the current rotation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_3x3 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n - True: the internal matrix is changed in-place\n - False: a new matrix is returned and the current one is not changed (default) \n reverse_order: bool, optional\n - False: self * mat\n - True: mat * self\n \n Returns\n -------\n result : Matrix_3x3 | array_like\n Matrix_3x3 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n # get array from matrix object or convert to numpy array (if needed) \n mat = mat.m if type(mat) == Matrix_3x3 else np.asarray(mat)\n\n a,b = (mat, self.m) if reverse_order else (self.m, mat)\n\n if mat.ndim == 1: \n return np.matmul(a, b) # multiplication by 3D vector\n elif in_place:\n np.matmul(a, b, self.m) # multiplication by matrix, in place\n return self\n else: # multiplication by matrix, return new Matrix_3x3\n return Matrix_3x3(np.matmul(a, b))" }, { "identifier": "Matrix_4x4", "path": "math_ops/Matrix_4x4.py", "snippet": "class Matrix_4x4():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_4x4( ) # create identity matrix\n b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix\n c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix\n d = Matrix_4x4( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(4)\n elif type(matrix) == Matrix_4x4: \n self.m = np.copy(matrix.m)\n elif type(matrix) == Matrix_3x3: \n self.m = np.identity(4)\n self.m[0:3,0:3] = matrix.m\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (4,4) #reshape if needed, throw error if impossible\n\n\n @classmethod\n def from_translation(cls, translation_vec):\n '''\n Create transformation matrix from translation_vec translation\n e.g. Matrix_4x4.from_translation((a,b,c))\n output: [[1,0,0,a],[0,1,0,b],[0,0,1,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n @classmethod\n def from_3x3_and_translation(cls, mat3x3:Matrix_3x3, translation_vec):\n '''\n Create transformation matrix from rotation matrix (3x3) and translation\n e.g. Matrix_4x4.from_3x3_and_translation(r,(a,b,c)) \n output: [[r00,r01,r02,a],[r10,r11,r12,b],[r20,r21,r22,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,0:3] = mat3x3.m\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n def translate(self, translation_vec, in_place=False):\n '''\n Translates the current transformation matrix\n\n Parameters\n ----------\n translation_vec : array_like, length 3\n translation vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place\n * False: a new matrix is returned and the current one is not changed \n\n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n vec = np.array([*translation_vec,1])# conversion to 4D vector\n np.matmul(self.m, vec, out=vec) # compute only 4th column\n\n if in_place:\n self.m[:,3] = vec\n return self\n else:\n ret = Matrix_4x4(self.m)\n ret.m[:,3] = vec\n return ret\n\n\n def get_translation(self):\n ''' Get translation vector (x,y,z) '''\n return self.m[0:3,3] # return view\n\n def get_x(self):\n return self.m[0,3]\n\n def get_y(self):\n return self.m[1,3]\n\n def get_z(self):\n return self.m[2,3]\n\n def get_rotation_4x4(self):\n ''' Get Matrix_4x4 without translation ''' \n mat = Matrix_4x4(self)\n mat.m[0:3,3] = 0\n return mat\n\n def get_rotation(self):\n ''' Get rotation Matrix_3x3 '''\n return Matrix_3x3(self.m[0:3,0:3])\n\n def get_distance(self):\n ''' Get translation vector length '''\n return np.linalg.norm(self.m[0:3,3])\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n \n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(np.clip(self.m[2,2],-1,1)) * 180 / pi)\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n\n # shortcuts for rotation around 1 axis\n if rotation_vec[0]==0:\n if rotation_vec[1]==0:\n if rotation_vec[2]==1:\n return self.rotate_z_rad(rotation_rad, in_place)\n elif rotation_vec[2]==-1:\n return self.rotate_z_rad(-rotation_rad, in_place)\n elif rotation_vec[2]==0:\n if rotation_vec[1]==1:\n return self.rotate_y_rad(rotation_rad, in_place)\n elif rotation_vec[1]==-1:\n return self.rotate_y_rad(-rotation_rad, in_place)\n elif rotation_vec[1]==0 and rotation_vec[2]==0:\n if rotation_vec[0]==1:\n return self.rotate_x_rad(rotation_rad, in_place)\n elif rotation_vec[0]==-1:\n return self.rotate_x_rad(-rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys, 0],\n [xyc1 + zs, yyc1 + c, yzc1 - xs, 0],\n [xzc1 - ys, yzc1 + xs, zzc1 + c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0, 0],\n [ s, c, 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current transformation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_4x4(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False):\n '''\n Multiplies the current transformation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed (if mat is a 4x4 matrix)\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n if type(mat) == Matrix_4x4: \n mat = mat.m\n else:\n mat = np.asarray(mat) # conversion to array, if needed\n if mat.ndim == 1: # multiplication by 3D vector\n vec = np.append(mat,1) # conversion to 4D vector\n return np.matmul(self.m, vec)[0:3] # conversion to 3D vector\n\n if in_place:\n np.matmul(self.m, mat, self.m)\n return self\n else:\n return Matrix_4x4(np.matmul(self.m, mat))\n\n def __call__(self,mat, is_spherical=False):\n '''\n Multiplies the current transformation matrix by mat and returns a new matrix or vector\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n is_spherical : bool\n only relevant if mat is a 3D vector, True if it uses spherical coordinates\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix; \n a 3D vector is returned if mat is a vector\n '''\n\n if is_spherical and mat.ndim == 1: mat = M.deg_sph2cart(mat)\n return self.multiply(mat,False)" }, { "identifier": "Body_Part", "path": "world/commons/Body_Part.py", "snippet": "class Body_Part():\n def __init__(self, mass) -> None:\n self.mass = float(mass)\n self.joints = []\n self.transform = Matrix_4x4() # body part to head transformation matrix" }, { "identifier": "Joint_Info", "path": "world/commons/Joint_Info.py", "snippet": "class Joint_Info():\n def __init__(self, xml_element) -> None:\n self.perceptor = xml_element.attrib['perceptor']\n self.effector = xml_element.attrib['effector']\n self.axes = np.array([\n float(xml_element.attrib['xaxis']), \n float(xml_element.attrib['yaxis']), \n float(xml_element.attrib['zaxis'])])\n self.min = int(xml_element.attrib['min'])\n self.max = int(xml_element.attrib['max'])\n\n self.anchor0_part = xml_element[0].attrib['part']\n self.anchor0_axes = np.array([\n float(xml_element[0].attrib['y']), \n float(xml_element[0].attrib['x']), \n float(xml_element[0].attrib['z'])]) #x and y axes are switched\n\n self.anchor1_part = xml_element[1].attrib['part']\n self.anchor1_axes_neg = np.array([\n -float(xml_element[1].attrib['y']), \n -float(xml_element[1].attrib['x']), \n -float(xml_element[1].attrib['z'])]) #x and y axes are switched" } ]
from collections import deque from math import atan, pi, sqrt, tan from math_ops.Math_Ops import Math_Ops as M from math_ops.Matrix_3x3 import Matrix_3x3 from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Body_Part import Body_Part from world.commons.Joint_Info import Joint_Info import numpy as np import xml.etree.ElementTree as xmlp
12,934
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head
self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field
1
2023-12-16 23:40:23+00:00
16k
Sam-Izdat/tinycio
src/tinycio/util/colorutil.py
[ { "identifier": "Float2", "path": "src/tinycio/numerics/vector.py", "snippet": "class Float2(np.ndarray):\n \"\"\"\n Float2 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n assert len(args[0]) == 2, \"list/tuple must have 2 components\"\n arr = np.asarray([args[0][0], args[0][1]], dtype=np.float32).view(cls)\n elif isinstance(args[0], np.ndarray):\n assert len(args[0].squeeze().shape) == 1 and args[0].shape[0] == 2, \\\n \"numpy array must be sized [C=2] or [C=2, H=1, W=1]\"\n arr = np.asarray(args[0].squeeze(), dtype=np.float32).view(cls)\n elif torch.is_tensor(args[0]):\n assert len(args[0].squeeze().size()) == 1 and args[0].size(0) == 2, \\\n \"torch tensor must be sized [C=2] or [C=2, H=1, W=1]\"\n value = args[0].squeeze().float().cpu()\n arr = np.asarray([value[0].item(), value[1].item()], dtype=np.float32).view(cls)\n else:\n value = float(args[0])\n arr = np.asarray([value, value], dtype=np.float32).view(cls)\n elif len(args) == 2:\n arr = np.asarray(args, dtype=np.float32).view(cls)\n else: \n raise TypeError(\"Float2 only accepts 1 or 2 arguments.\")\n return arr\n\n def list(self) -> list:\n \"\"\"Returns values as Python list\"\"\"\n return [self[0], self[1]]\n\n def tuple(self) -> tuple:\n \"\"\"Returns values as Python tuple\"\"\"\n return (self[0], self[1])\n\n @property\n def x(self) -> float:\n return self[0]\n @x.setter\n def x(self, value):\n self[0] = value\n @property\n def y(self) -> float:\n return self[1]\n @y.setter\n def y(self, value):\n self[1] = value\n @property\n def r(self) -> float:\n return self[0]\n @r.setter\n def r(self, value):\n self[0] = value\n @property\n def g(self) -> float:\n return self[1]\n @g.setter\n def g(self, value):\n self[1] = value\n\n @staticmethod\n def zero():\n \"\"\"Returns numeric type filled with zero values\"\"\"\n return Float2(0., 0.)\n @staticmethod\n def one():\n \"\"\"Returns numeric type filled with one values\"\"\"\n return Float2(1., 1.)\n @staticmethod\n def x_axis():\n \"\"\"Returns numeric type with x-axis set to 1 and all others to 0\"\"\"\n return Float2(1., 0.)\n @staticmethod\n def y_axis():\n \"\"\"Returns numeric type with y-axis set to 1 and all others to 0\"\"\"\n return Float2(0., 1.)\n\n @property\n def xx(self): return Float2(self.x, self.x)\n @property\n def xy(self): return self\n @property\n def yx(self): return Float2(self.y, self.x)\n @property\n def yy(self): return Float2(self.y, self.y)\n\n @property\n def rr(self): return Float2(self.r, self.r)\n @property\n def rg(self): return self\n @property\n def gr(self): return Float2(self.g, self.r)\n @property\n def gg(self): return Float2(self.g, self.g)\n\n @property\n def xxx(self): return Float3(self.x, self.x, self.x)\n @property\n def xxy(self): return Float3(self.x, self.x, self.y)\n @property\n def xyx(self): return Float3(self.x, self.y, self.x)\n @property\n def xyy(self): return Float3(self.x, self.y, self.y)\n @property\n def yxx(self): return Float3(self.y, self.x, self.x)\n @property\n def yxy(self): return Float3(self.y, self.x, self.y)\n @property\n def yyx(self): return Float3(self.y, self.y, self.x)\n @property\n def yyy(self): return Float3(self.y, self.y, self.y)\n\n @property\n def rrr(self): return Float3(self.r, self.r, self.r)\n @property\n def rrg(self): return Float3(self.r, self.r, self.g)\n @property\n def rgr(self): return Float3(self.r, self.g, self.r)\n @property\n def rgg(self): return Float3(self.r, self.g, self.g)\n @property\n def grr(self): return Float3(self.g, self.r, self.r)\n @property\n def grg(self): return Float3(self.g, self.r, self.g)\n @property\n def ggr(self): return Float3(self.g, self.g, self.r)\n @property\n def ggg(self): return Float3(self.g, self.g, self.g)\n\n @property\n def xxxx(self): return Float4(self.x, self.x, self.x, self.x)\n @property\n def xxxy(self): return Float4(self.x, self.x, self.x, self.y)\n @property\n def xxyx(self): return Float4(self.x, self.x, self.y, self.x)\n @property\n def xxyy(self): return Float4(self.x, self.x, self.y, self.y)\n @property\n def xyxx(self): return Float4(self.x, self.y, self.x, self.x)\n @property\n def xyxy(self): return Float4(self.x, self.y, self.x, self.y)\n @property\n def xyyx(self): return Float4(self.x, self.y, self.y, self.x)\n @property\n def xyyy(self): return Float4(self.x, self.y, self.y, self.y)\n @property\n def yxxx(self): return Float4(self.y, self.x, self.x, self.x)\n @property\n def yxxy(self): return Float4(self.y, self.x, self.x, self.y)\n @property\n def yxyx(self): return Float4(self.y, self.x, self.y, self.x)\n @property\n def yxyy(self): return Float4(self.y, self.x, self.y, self.y)\n @property\n def yyxx(self): return Float4(self.y, self.y, self.x, self.x)\n @property\n def yyxy(self): return Float4(self.y, self.y, self.x, self.y)\n @property\n def yyyx(self): return Float4(self.y, self.y, self.y, self.x)\n @property\n def yyyy(self): return Float4(self.y, self.y, self.y, self.y)\n\n @property\n def rrrr(self): return Float4(self.r, self.r, self.r, self.r)\n @property\n def rrrg(self): return Float4(self.r, self.r, self.r, self.g)\n @property\n def rrgr(self): return Float4(self.r, self.r, self.g, self.r)\n @property\n def rrgg(self): return Float4(self.r, self.r, self.g, self.g)\n @property\n def rgrr(self): return Float4(self.r, self.g, self.r, self.r)\n @property\n def rgrg(self): return Float4(self.r, self.g, self.r, self.g)\n @property\n def rggr(self): return Float4(self.r, self.g, self.g, self.r)\n @property\n def rggg(self): return Float4(self.r, self.g, self.g, self.g)\n @property\n def grrr(self): return Float4(self.g, self.r, self.r, self.r)\n @property\n def grrg(self): return Float4(self.g, self.r, self.r, self.g)\n @property\n def grgr(self): return Float4(self.g, self.r, self.g, self.r)\n @property\n def grgg(self): return Float4(self.g, self.r, self.g, self.g)\n @property\n def ggrr(self): return Float4(self.g, self.g, self.r, self.r)\n @property\n def ggrg(self): return Float4(self.g, self.g, self.r, self.g)\n @property\n def gggr(self): return Float4(self.g, self.g, self.g, self.r)\n @property\n def gggg(self): return Float4(self.g, self.g, self.g, self.g)" }, { "identifier": "Float3", "path": "src/tinycio/numerics/vector.py", "snippet": "class Float3(np.ndarray):\n \"\"\"\n Float3 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n assert len(args[0]) == 3, \"list/tuple must have 3 components\"\n arr = np.asarray([args[0][0], args[0][1], args[0][2]], dtype=np.float32).view(cls)\n elif isinstance(args[0], np.ndarray):\n assert len(args[0].squeeze().shape) == 1 and args[0].shape[0] == 3, \\\n \"numpy array must be sized [C=3] or [C=3, H=1, W=1]\"\n arr = np.asarray(args[0].squeeze(), dtype=np.float32).view(cls)\n elif torch.is_tensor(args[0]):\n assert len(args[0].squeeze().size()) == 1 and args[0].size(0) == 3, \\\n \"torch tensor must be sized [C=3] or [C=3, H=1, W=1]\"\n value = args[0].squeeze().float().cpu()\n arr = np.asarray([value[0].item(), value[1].item(), value[2].item()], dtype=np.float32).view(cls)\n else:\n value = float(args[0])\n arr = np.asarray([value, value, value], dtype=np.float32).view(cls)\n elif len(args) == 3:\n arr = np.asarray(args, dtype=np.float32).view(cls)\n else: \n raise TypeError(\"Float3 only accepts 1 or 3 arguments.\")\n return arr\n\n def list(self) -> list:\n \"\"\"Returns values as Python list\"\"\"\n return [self[0], self[1], self[2]]\n\n def tuple(self) -> tuple:\n \"\"\"Returns values as Python tuple\"\"\"\n return (self[0], self[1], self[2])\n\n @property\n def x(self) -> float:\n return self[0]\n @x.setter\n def x(self, value):\n self[0] = value\n @property\n def y(self) -> float:\n return self[1]\n @y.setter\n def y(self, value):\n self[1] = value\n @property\n def z(self) -> float:\n return self[2]\n @z.setter\n def z(self, value):\n self[2] = value\n @property\n def r(self) -> float:\n return self[0]\n @r.setter\n def r(self, value):\n self[0] = value\n @property\n def g(self) -> float:\n return self[1]\n @g.setter\n def g(self, value):\n self[1] = value\n @property\n def b(self) -> float:\n return self[2]\n @b.setter\n def b(self, value):\n self[2] = value\n @staticmethod\n def zero():\n \"\"\"Returns numeric type filled with zero values\"\"\"\n return Float3(0., 0., 0.)\n @staticmethod\n def one():\n \"\"\"Returns numeric type filled with one values\"\"\"\n return Float3(1., 1., 1.)\n @staticmethod\n def x_axis():\n \"\"\"Returns numeric type with x-axis set to 1 and all others to 0\"\"\"\n return Float3(1., 0., 0.)\n @staticmethod\n def y_axis():\n \"\"\"Returns numeric type with y-axis set to 1 and all others to 0\"\"\"\n return Float3(0., 1., 0.)\n @staticmethod\n def z_axis():\n \"\"\"Returns numeric type with z-axis set to 1 and all others to 0\"\"\"\n return Float3(0., 0., 1.)\n\n @property\n def xx(self): return Float2(self.x, self.x)\n @property\n def xy(self): return Float2(self.x, self.y)\n @property\n def xz(self): return Float2(self.x, self.z)\n @property\n def yx(self): return Float2(self.y, self.x)\n @property\n def yy(self): return Float2(self.y, self.y)\n @property\n def yz(self): return Float2(self.y, self.z)\n @property\n def zx(self): return Float2(self.z, self.x)\n @property\n def zy(self): return Float2(self.z, self.y)\n @property\n def zz(self): return Float2(self.z, self.z)\n\n @property\n def rr(self): return Float2(self.r, self.r)\n @property\n def rg(self): return Float2(self.r, self.g)\n @property\n def rb(self): return Float2(self.r, self.b)\n @property\n def gr(self): return Float2(self.g, self.r)\n @property\n def gg(self): return Float2(self.g, self.g)\n @property\n def gb(self): return Float2(self.g, self.b)\n @property\n def br(self): return Float2(self.b, self.r)\n @property\n def bg(self): return Float2(self.b, self.g)\n @property\n def bb(self): return Float2(self.b, self.b)\n\n @property\n def xxx(self): return Float3(self.x, self.x, self.x)\n @property\n def xxy(self): return Float3(self.x, self.x, self.y)\n @property\n def xxz(self): return Float3(self.x, self.x, self.z)\n @property\n def xyx(self): return Float3(self.x, self.y, self.x)\n @property\n def xyy(self): return Float3(self.x, self.y, self.y)\n @property\n def xyz(self): return self\n @property\n def xzx(self): return Float3(self.x, self.z, self.x)\n @property\n def xzy(self): return Float3(self.x, self.z, self.y)\n @property\n def xzz(self): return Float3(self.x, self.z, self.z)\n @property\n def yxx(self): return Float3(self.y, self.x, self.x)\n @property\n def yxy(self): return Float3(self.y, self.x, self.y)\n @property\n def yxz(self): return Float3(self.y, self.x, self.z)\n @property\n def yyx(self): return Float3(self.y, self.y, self.x)\n @property\n def yyy(self): return Float3(self.y, self.y, self.y)\n @property\n def yyz(self): return Float3(self.y, self.y, self.z)\n @property\n def yzx(self): return Float3(self.y, self.z, self.x)\n @property\n def yzy(self): return Float3(self.y, self.z, self.y)\n @property\n def yzz(self): return Float3(self.y, self.z, self.z)\n @property\n def zxx(self): return Float3(self.z, self.x, self.x)\n @property\n def zxy(self): return Float3(self.z, self.x, self.y)\n @property\n def zxz(self): return Float3(self.z, self.x, self.z)\n @property\n def zyx(self): return Float3(self.z, self.y, self.x)\n @property\n def zyy(self): return Float3(self.z, self.y, self.y)\n @property\n def zyz(self): return Float3(self.z, self.y, self.z)\n @property\n def zzx(self): return Float3(self.z, self.z, self.x)\n @property\n def zzy(self): return Float3(self.z, self.z, self.y)\n @property\n def zzz(self): return Float3(self.z, self.z, self.z)\n\n @property\n def rrr(self): return Float3(self.r, self.r, self.r)\n @property\n def rrg(self): return Float3(self.r, self.r, self.g)\n @property\n def rrb(self): return Float3(self.r, self.r, self.b)\n @property\n def rgr(self): return Float3(self.r, self.g, self.r)\n @property\n def rgg(self): return Float3(self.r, self.g, self.g)\n @property\n def rgb(self): return self\n @property\n def rbr(self): return Float3(self.r, self.b, self.r)\n @property\n def rbg(self): return Float3(self.r, self.b, self.g)\n @property\n def rbb(self): return Float3(self.r, self.b, self.b)\n @property\n def grr(self): return Float3(self.g, self.r, self.r)\n @property\n def grg(self): return Float3(self.g, self.r, self.g)\n @property\n def grb(self): return Float3(self.g, self.r, self.b)\n @property\n def ggr(self): return Float3(self.g, self.g, self.r)\n @property\n def ggg(self): return Float3(self.g, self.g, self.g)\n @property\n def ggb(self): return Float3(self.g, self.g, self.b)\n @property\n def gbr(self): return Float3(self.g, self.b, self.r)\n @property\n def gbg(self): return Float3(self.g, self.b, self.g)\n @property\n def gbb(self): return Float3(self.g, self.b, self.b)\n @property\n def brr(self): return Float3(self.b, self.r, self.r)\n @property\n def brg(self): return Float3(self.b, self.r, self.g)\n @property\n def brb(self): return Float3(self.b, self.r, self.b)\n @property\n def bgr(self): return Float3(self.b, self.g, self.r)\n @property\n def bgg(self): return Float3(self.b, self.g, self.g)\n @property\n def bgb(self): return Float3(self.b, self.g, self.b)\n @property\n def bbr(self): return Float3(self.b, self.b, self.r)\n @property\n def bbg(self): return Float3(self.b, self.b, self.g)\n @property\n def bbb(self): return Float3(self.b, self.b, self.b)\n\n @property\n def xxxx(self): return Float4(self.x, self.x, self.x, self.x)\n @property\n def xxxy(self): return Float4(self.x, self.x, self.x, self.y)\n @property\n def xxxz(self): return Float4(self.x, self.x, self.x, self.z)\n @property\n def xxyx(self): return Float4(self.x, self.x, self.y, self.x)\n @property\n def xxyy(self): return Float4(self.x, self.x, self.y, self.y)\n @property\n def xxyz(self): return Float4(self.x, self.x, self.y, self.z)\n @property\n def xxzx(self): return Float4(self.x, self.x, self.z, self.x)\n @property\n def xxzy(self): return Float4(self.x, self.x, self.z, self.y)\n @property\n def xxzz(self): return Float4(self.x, self.x, self.z, self.z)\n @property\n def xyxx(self): return Float4(self.x, self.y, self.x, self.x)\n @property\n def xyxy(self): return Float4(self.x, self.y, self.x, self.y)\n @property\n def xyxz(self): return Float4(self.x, self.y, self.x, self.z)\n @property\n def xyyx(self): return Float4(self.x, self.y, self.y, self.x)\n @property\n def xyyy(self): return Float4(self.x, self.y, self.y, self.y)\n @property\n def xyyz(self): return Float4(self.x, self.y, self.y, self.z)\n @property\n def xyzx(self): return Float4(self.x, self.y, self.z, self.x)\n @property\n def xyzy(self): return Float4(self.x, self.y, self.z, self.y)\n @property\n def xyzz(self): return Float4(self.x, self.y, self.z, self.z)\n @property\n def xzxx(self): return Float4(self.x, self.z, self.x, self.x)\n @property\n def xzxy(self): return Float4(self.x, self.z, self.x, self.y)\n @property\n def xzxz(self): return Float4(self.x, self.z, self.x, self.z)\n @property\n def xzyx(self): return Float4(self.x, self.z, self.y, self.x)\n @property\n def xzyy(self): return Float4(self.x, self.z, self.y, self.y)\n @property\n def xzyz(self): return Float4(self.x, self.z, self.y, self.z)\n @property\n def xzzx(self): return Float4(self.x, self.z, self.z, self.x)\n @property\n def xzzy(self): return Float4(self.x, self.z, self.z, self.y)\n @property\n def xzzz(self): return Float4(self.x, self.z, self.z, self.z)\n @property\n def yxxx(self): return Float4(self.y, self.x, self.x, self.x)\n @property\n def yxxy(self): return Float4(self.y, self.x, self.x, self.y)\n @property\n def yxxz(self): return Float4(self.y, self.x, self.x, self.z)\n @property\n def yxyx(self): return Float4(self.y, self.x, self.y, self.x)\n @property\n def yxyy(self): return Float4(self.y, self.x, self.y, self.y)\n @property\n def yxyz(self): return Float4(self.y, self.x, self.y, self.z)\n @property\n def yxzx(self): return Float4(self.y, self.x, self.z, self.x)\n @property\n def yxzy(self): return Float4(self.y, self.x, self.z, self.y)\n @property\n def yxzz(self): return Float4(self.y, self.x, self.z, self.z)\n @property\n def yyxx(self): return Float4(self.y, self.y, self.x, self.x)\n @property\n def yyxy(self): return Float4(self.y, self.y, self.x, self.y)\n @property\n def yyxz(self): return Float4(self.y, self.y, self.x, self.z)\n @property\n def yyyx(self): return Float4(self.y, self.y, self.y, self.x)\n @property\n def yyyy(self): return Float4(self.y, self.y, self.y, self.y)\n @property\n def yyyz(self): return Float4(self.y, self.y, self.y, self.z)\n @property\n def yyzx(self): return Float4(self.y, self.y, self.z, self.x)\n @property\n def yyzy(self): return Float4(self.y, self.y, self.z, self.y)\n @property\n def yyzz(self): return Float4(self.y, self.y, self.z, self.z)\n @property\n def yzxx(self): return Float4(self.y, self.z, self.x, self.x)\n @property\n def yzxy(self): return Float4(self.y, self.z, self.x, self.y)\n @property\n def yzxz(self): return Float4(self.y, self.z, self.x, self.z)\n @property\n def yzyx(self): return Float4(self.y, self.z, self.y, self.x)\n @property\n def yzyy(self): return Float4(self.y, self.z, self.y, self.y)\n @property\n def yzyz(self): return Float4(self.y, self.z, self.y, self.z)\n @property\n def yzzx(self): return Float4(self.y, self.z, self.z, self.x)\n @property\n def yzzy(self): return Float4(self.y, self.z, self.z, self.y)\n @property\n def yzzz(self): return Float4(self.y, self.z, self.z, self.z)\n @property\n def zxxx(self): return Float4(self.z, self.x, self.x, self.x)\n @property\n def zxxy(self): return Float4(self.z, self.x, self.x, self.y)\n @property\n def zxxz(self): return Float4(self.z, self.x, self.x, self.z)\n @property\n def zxyx(self): return Float4(self.z, self.x, self.y, self.x)\n @property\n def zxyy(self): return Float4(self.z, self.x, self.y, self.y)\n @property\n def zxyz(self): return Float4(self.z, self.x, self.y, self.z)\n @property\n def zxzx(self): return Float4(self.z, self.x, self.z, self.x)\n @property\n def zxzy(self): return Float4(self.z, self.x, self.z, self.y)\n @property\n def zxzz(self): return Float4(self.z, self.x, self.z, self.z)\n @property\n def zyxx(self): return Float4(self.z, self.y, self.x, self.x)\n @property\n def zyxy(self): return Float4(self.z, self.y, self.x, self.y)\n @property\n def zyxz(self): return Float4(self.z, self.y, self.x, self.z)\n @property\n def zyyx(self): return Float4(self.z, self.y, self.y, self.x)\n @property\n def zyyy(self): return Float4(self.z, self.y, self.y, self.y)\n @property\n def zyyz(self): return Float4(self.z, self.y, self.y, self.z)\n @property\n def zyzx(self): return Float4(self.z, self.y, self.z, self.x)\n @property\n def zyzy(self): return Float4(self.z, self.y, self.z, self.y)\n @property\n def zyzz(self): return Float4(self.z, self.y, self.z, self.z)\n @property\n def zzxx(self): return Float4(self.z, self.z, self.x, self.x)\n @property\n def zzxy(self): return Float4(self.z, self.z, self.x, self.y)\n @property\n def zzxz(self): return Float4(self.z, self.z, self.x, self.z)\n @property\n def zzyx(self): return Float4(self.z, self.z, self.y, self.x)\n @property\n def zzyy(self): return Float4(self.z, self.z, self.y, self.y)\n @property\n def zzyz(self): return Float4(self.z, self.z, self.y, self.z)\n @property\n def zzzx(self): return Float4(self.z, self.z, self.z, self.x)\n @property\n def zzzy(self): return Float4(self.z, self.z, self.z, self.y)\n @property\n def zzzz(self): return Float4(self.z, self.z, self.z, self.z)\n\n @property\n def rrrr(self): return Float4(self.r, self.r, self.r, self.r)\n @property\n def rrrg(self): return Float4(self.r, self.r, self.r, self.g)\n @property\n def rrrb(self): return Float4(self.r, self.r, self.r, self.b)\n @property\n def rrgr(self): return Float4(self.r, self.r, self.g, self.r)\n @property\n def rrgg(self): return Float4(self.r, self.r, self.g, self.g)\n @property\n def rrgb(self): return Float4(self.r, self.r, self.g, self.b)\n @property\n def rrbr(self): return Float4(self.r, self.r, self.b, self.r)\n @property\n def rrbg(self): return Float4(self.r, self.r, self.b, self.g)\n @property\n def rrbb(self): return Float4(self.r, self.r, self.b, self.b)\n @property\n def rgrr(self): return Float4(self.r, self.g, self.r, self.r)\n @property\n def rgrg(self): return Float4(self.r, self.g, self.r, self.g)\n @property\n def rgrb(self): return Float4(self.r, self.g, self.r, self.b)\n @property\n def rggr(self): return Float4(self.r, self.g, self.g, self.r)\n @property\n def rggg(self): return Float4(self.r, self.g, self.g, self.g)\n @property\n def rggb(self): return Float4(self.r, self.g, self.g, self.b)\n @property\n def rgbr(self): return Float4(self.r, self.g, self.b, self.r)\n @property\n def rgbg(self): return Float4(self.r, self.g, self.b, self.g)\n @property\n def rgbb(self): return Float4(self.r, self.g, self.b, self.b)\n @property\n def rbrr(self): return Float4(self.r, self.b, self.r, self.r)\n @property\n def rbrg(self): return Float4(self.r, self.b, self.r, self.g)\n @property\n def rbrb(self): return Float4(self.r, self.b, self.r, self.b)\n @property\n def rbgr(self): return Float4(self.r, self.b, self.g, self.r)\n @property\n def rbgg(self): return Float4(self.r, self.b, self.g, self.g)\n @property\n def rbgb(self): return Float4(self.r, self.b, self.g, self.b)\n @property\n def rbbr(self): return Float4(self.r, self.b, self.b, self.r)\n @property\n def rbbg(self): return Float4(self.r, self.b, self.b, self.g)\n @property\n def rbbb(self): return Float4(self.r, self.b, self.b, self.b)\n @property\n def grrr(self): return Float4(self.g, self.r, self.r, self.r)\n @property\n def grrg(self): return Float4(self.g, self.r, self.r, self.g)\n @property\n def grrb(self): return Float4(self.g, self.r, self.r, self.b)\n @property\n def grgr(self): return Float4(self.g, self.r, self.g, self.r)\n @property\n def grgg(self): return Float4(self.g, self.r, self.g, self.g)\n @property\n def grgb(self): return Float4(self.g, self.r, self.g, self.b)\n @property\n def grbr(self): return Float4(self.g, self.r, self.b, self.r)\n @property\n def grbg(self): return Float4(self.g, self.r, self.b, self.g)\n @property\n def grbb(self): return Float4(self.g, self.r, self.b, self.b)\n @property\n def ggrr(self): return Float4(self.g, self.g, self.r, self.r)\n @property\n def ggrg(self): return Float4(self.g, self.g, self.r, self.g)\n @property\n def ggrb(self): return Float4(self.g, self.g, self.r, self.b)\n @property\n def gggr(self): return Float4(self.g, self.g, self.g, self.r)\n @property\n def gggg(self): return Float4(self.g, self.g, self.g, self.g)\n @property\n def gggb(self): return Float4(self.g, self.g, self.g, self.b)\n @property\n def ggbr(self): return Float4(self.g, self.g, self.b, self.r)\n @property\n def ggbg(self): return Float4(self.g, self.g, self.b, self.g)\n @property\n def ggbb(self): return Float4(self.g, self.g, self.b, self.b)\n @property\n def gbrr(self): return Float4(self.g, self.b, self.r, self.r)\n @property\n def gbrg(self): return Float4(self.g, self.b, self.r, self.g)\n @property\n def gbrb(self): return Float4(self.g, self.b, self.r, self.b)\n @property\n def gbgr(self): return Float4(self.g, self.b, self.g, self.r)\n @property\n def gbgg(self): return Float4(self.g, self.b, self.g, self.g)\n @property\n def gbgb(self): return Float4(self.g, self.b, self.g, self.b)\n @property\n def gbbr(self): return Float4(self.g, self.b, self.b, self.r)\n @property\n def gbbg(self): return Float4(self.g, self.b, self.b, self.g)\n @property\n def gbbb(self): return Float4(self.g, self.b, self.b, self.b)\n @property\n def brrr(self): return Float4(self.b, self.r, self.r, self.r)\n @property\n def brrg(self): return Float4(self.b, self.r, self.r, self.g)\n @property\n def brrb(self): return Float4(self.b, self.r, self.r, self.b)\n @property\n def brgr(self): return Float4(self.b, self.r, self.g, self.r)\n @property\n def brgg(self): return Float4(self.b, self.r, self.g, self.g)\n @property\n def brgb(self): return Float4(self.b, self.r, self.g, self.b)\n @property\n def brbr(self): return Float4(self.b, self.r, self.b, self.r)\n @property\n def brbg(self): return Float4(self.b, self.r, self.b, self.g)\n @property\n def brbb(self): return Float4(self.b, self.r, self.b, self.b)\n @property\n def bgrr(self): return Float4(self.b, self.g, self.r, self.r)\n @property\n def bgrg(self): return Float4(self.b, self.g, self.r, self.g)\n @property\n def bgrb(self): return Float4(self.b, self.g, self.r, self.b)\n @property\n def bggr(self): return Float4(self.b, self.g, self.g, self.r)\n @property\n def bggg(self): return Float4(self.b, self.g, self.g, self.g)\n @property\n def bggb(self): return Float4(self.b, self.g, self.g, self.b)\n @property\n def bgbr(self): return Float4(self.b, self.g, self.b, self.r)\n @property\n def bgbg(self): return Float4(self.b, self.g, self.b, self.g)\n @property\n def bgbb(self): return Float4(self.b, self.g, self.b, self.b)\n @property\n def bbrr(self): return Float4(self.b, self.b, self.r, self.r)\n @property\n def bbrg(self): return Float4(self.b, self.b, self.r, self.g)\n @property\n def bbrb(self): return Float4(self.b, self.b, self.r, self.b)\n @property\n def bbgr(self): return Float4(self.b, self.b, self.g, self.r)\n @property\n def bbgg(self): return Float4(self.b, self.b, self.g, self.g)\n @property\n def bbgb(self): return Float4(self.b, self.b, self.g, self.b)\n @property\n def bbbr(self): return Float4(self.b, self.b, self.b, self.r)\n @property\n def bbbg(self): return Float4(self.b, self.b, self.b, self.g)\n @property\n def bbbb(self): return Float4(self.b, self.b, self.b, self.b)" } ]
import typing import torch import numpy as np from typing import Union from ..numerics import Float2, Float3
11,196
def col_hsv_to_rgb(hsv:Union[Float3, Color]) -> Float3: """ Convert HSV color to RGB. :param hsv: HSV color :type hsv: Float3 | Color """ h, s, v = hsv.x, hsv.y, hsv.z i = np.floor(h * 6) f = h * 6 - i p = v * (1 - s) q = v * (1 - f * s) t = v * (1 - (1 - f) * s) r, g, b = [ (v, t, p), (q, v, p), (p, v, t), (p, q, v), (t, p, v), (v, p, q), ][int(i%6)] return Float3(r, g, b) def col_rgb_to_hsv(rgb:Union[Float3, Color]) -> Float3: """ Convert RGB color to HSV. :param rgb: RGB color :type rgb: Float3 | Color """ r, g, b = rgb.r, rgb.g, rgb.b high = np.max([r, g, b]) low = np.min([r, g, b]) h, s, v = high, high, high d = high - low s = 0 if high == 0 else d/high if high == low: h = 0.0 else: h = { r: (g - b) / d + (6 if g < b else 0), g: (b - r) / d + 2, b: (r - g) / d + 4, }[high] h /= 6 return Float3(h, s, v) # My fairly lazy OKHSV/OKHSL "port" of: # https://github.com/holbrookdev/ok-color-picker # MIT License # Copyright (c) 2022 Brian Holbrook # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __ok_eps = 1e-7 def col_oklab_to_linear_srgb(lab:Union[Float3, Color]) -> Float3: """ Convert OKLAB color to linear sRGB. :param lab: OKLAB color :type lab: Float3 | Color """ L, a, b = lab.x, lab.y, lab.z l_ = L + 0.3963377774 * a + 0.2158037573 * b m_ = L - 0.1055613458 * a - 0.0638541728 * b s_ = L - 0.0894841775 * a - 1.291485548 * b l = l_ * l_ * l_ m = m_ * m_ * m_ s = s_ * s_ * s_ res = Float3( +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, -0.0041960863 * l - 0.7034186147 * m + 1.707614701 * s ) return res def col_linear_srgb_to_oklab(srgb:Union[Float3, Color]) -> Float3: """ Convert linear sRGB color to OKLAB. :param srgb: linear sRGB color :type srgb: Float3 | Color """ r, g, b = srgb.r, srgb.g, srgb.b l = 0.4122214708 * r + 0.5363325363 * g + 0.0514459929 * b m = 0.2119034982 * r + 0.6806995451 * g + 0.1073969566 * b s = 0.0883024619 * r + 0.2817188376 * g + 0.6299787005 * b l_ = np.cbrt(l) m_ = np.cbrt(m) s_ = np.cbrt(s) res = Float3( 0.2104542553 * l_ + 0.793617785 * m_ - 0.0040720468 * s_, 1.9779984951 * l_ - 2.428592205 * m_ + 0.4505937099 * s_, 0.0259040371 * l_ + 0.7827717662 * m_ - 0.808675766 * s_ ) return res
from __future__ import annotations def srgb_luminance(im_srgb:Union[torch.Tensor, ColorImage]) -> torch.Tensor: """ Return relative luminance of linear sRGB image. :param im_srgb: [C=3, H, W] color image tensor in sRGB color space :type im_srgb: torch.Tensor | ColorImage :return: [C=1, H, W] image tensor """ lum_r, lum_g, lum_b = 0.2126, 0.7152, 0.0722 return lum_r * im_srgb[0:1,...] + lum_g * im_srgb[1:2,...] + lum_b * im_srgb[2:3,...] def apply_gamma(im:Union[torch.Tensor, ColorImage], gamma:float) -> torch.Tensor: """ Apply arbitrary gamma correction. :param im: Image tensor :type im: torch.Tensor | ColorImage :param gamma: Gamma correction (should be in the range [0.1, 10.0]) :return: Gamma-corrected image tensor """ if gamma == 1.: return im assert 0.1 <= gamma <= 10.0, "gamma value should be in range [0.1, 10.0]" im = torch.pow(im, gamma) def apply_hue_oklab(im_oklab:Union[torch.Tensor, ColorImage], hue_delta:float) -> torch.Tensor: """ Manually shift hue of an image by a -1 to +1 delta value. :param im_oklab: Image tensor in OKLAB color space :type im_oklab: torch.Tensor | ColorImage :param hue_delta: Hue shift value in the range [-1., 1.] :return: Image tensor in OKLAB color space with adjusted hue """ assert -1. <= hue_delta <= 1., "hue_delta value should be in range [-1., 1.]" L, a, b = im_oklab[0:1], im_oklab[1:2], im_oklab[2:3] hue_delta = ((hue_delta * 0.5) % 1.) * 2. * torch.pi # Calculate angle and magnitude in the a-b plane angle = torch.atan2(b, a) magnitude = torch.sqrt(a**2 + b**2) # Apply hue correction angle += hue_delta # Convert back to Cartesian coordinates a_corrected = magnitude * torch.cos(angle) b_corrected = magnitude * torch.sin(angle) corrected = torch.cat([L, a_corrected, b_corrected], dim=0) return corrected return im def col_hsv_to_rgb(hsv:Union[Float3, Color]) -> Float3: """ Convert HSV color to RGB. :param hsv: HSV color :type hsv: Float3 | Color """ h, s, v = hsv.x, hsv.y, hsv.z i = np.floor(h * 6) f = h * 6 - i p = v * (1 - s) q = v * (1 - f * s) t = v * (1 - (1 - f) * s) r, g, b = [ (v, t, p), (q, v, p), (p, v, t), (p, q, v), (t, p, v), (v, p, q), ][int(i%6)] return Float3(r, g, b) def col_rgb_to_hsv(rgb:Union[Float3, Color]) -> Float3: """ Convert RGB color to HSV. :param rgb: RGB color :type rgb: Float3 | Color """ r, g, b = rgb.r, rgb.g, rgb.b high = np.max([r, g, b]) low = np.min([r, g, b]) h, s, v = high, high, high d = high - low s = 0 if high == 0 else d/high if high == low: h = 0.0 else: h = { r: (g - b) / d + (6 if g < b else 0), g: (b - r) / d + 2, b: (r - g) / d + 4, }[high] h /= 6 return Float3(h, s, v) # My fairly lazy OKHSV/OKHSL "port" of: # https://github.com/holbrookdev/ok-color-picker # MIT License # Copyright (c) 2022 Brian Holbrook # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. __ok_eps = 1e-7 def col_oklab_to_linear_srgb(lab:Union[Float3, Color]) -> Float3: """ Convert OKLAB color to linear sRGB. :param lab: OKLAB color :type lab: Float3 | Color """ L, a, b = lab.x, lab.y, lab.z l_ = L + 0.3963377774 * a + 0.2158037573 * b m_ = L - 0.1055613458 * a - 0.0638541728 * b s_ = L - 0.0894841775 * a - 1.291485548 * b l = l_ * l_ * l_ m = m_ * m_ * m_ s = s_ * s_ * s_ res = Float3( +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, -0.0041960863 * l - 0.7034186147 * m + 1.707614701 * s ) return res def col_linear_srgb_to_oklab(srgb:Union[Float3, Color]) -> Float3: """ Convert linear sRGB color to OKLAB. :param srgb: linear sRGB color :type srgb: Float3 | Color """ r, g, b = srgb.r, srgb.g, srgb.b l = 0.4122214708 * r + 0.5363325363 * g + 0.0514459929 * b m = 0.2119034982 * r + 0.6806995451 * g + 0.1073969566 * b s = 0.0883024619 * r + 0.2817188376 * g + 0.6299787005 * b l_ = np.cbrt(l) m_ = np.cbrt(m) s_ = np.cbrt(s) res = Float3( 0.2104542553 * l_ + 0.793617785 * m_ - 0.0040720468 * s_, 1.9779984951 * l_ - 2.428592205 * m_ + 0.4505937099 * s_, 0.0259040371 * l_ + 0.7827717662 * m_ - 0.808675766 * s_ ) return res
def __ok_compute_max_saturation(ab:Float2) -> float:
0
2023-12-15 15:39:08+00:00
16k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetProcessor", "path": "magicanimate/models/multicontrolnet.py", "snippet": "class ControlNetProcessor(object):\n def __init__(\n self,\n controlnet: ControlNetModel,\n # image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],\n # controlnet_cond = torch.FloatTensor, #fix\n # conditioning_scale: float = 1.0,\n ):\n self.controlnet = controlnet\n # self.image = image\n # self.controlnet_cond = controlnet_cond #fix\n # self.conditioning_scale = conditioning_scale\n\n # def _default_height_width(self, height, width, image):\n # if isinstance(image, list):\n # image = image[0]\n\n # if height is None:\n # if isinstance(image, PIL.Image.Image):\n # height = image.height\n # elif isinstance(image, torch.Tensor):\n # height = image.shape[3]\n\n # height = (height // 8) * 8 # round down to nearest multiple of 8\n\n # if width is None:\n # if isinstance(image, PIL.Image.Image):\n # width = image.width\n # elif isinstance(image, torch.Tensor):\n # width = image.shape[2]\n\n # width = (width // 8) * 8 # round down to nearest multiple of 8\n\n # return height, width\n\n # def default_height_width(self, height, width):\n # return self._default_height_width(height, width, self.image)\n\n # def _prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype):\n # if not isinstance(image, torch.Tensor):\n # if isinstance(image, PIL.Image.Image):\n # image = [image]\n\n # if isinstance(image[0], PIL.Image.Image):\n # image = [\n # np.array(i.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"]))[None, :] for i in image\n # ]\n # image = np.concatenate(image, axis=0)\n # image = np.array(image).astype(np.float32) / 255.0\n # image = image.transpose(0, 3, 1, 2)\n # image = torch.from_numpy(image)\n # elif isinstance(image[0], torch.Tensor):\n # image = torch.cat(image, dim=0)\n\n # image_batch_size = image.shape[0]\n\n # if image_batch_size == 1:\n # repeat_by = batch_size\n # else:\n # # image batch size is the same as prompt batch size\n # repeat_by = num_images_per_prompt\n\n # image = image.repeat_interleave(repeat_by, dim=0)\n\n # image = image.to(device=device, dtype=dtype)\n\n # return image\n\n # def _check_inputs(self, image, prompt, prompt_embeds):\n # image_is_pil = isinstance(image, PIL.Image.Image)\n # image_is_tensor = isinstance(image, torch.Tensor)\n # image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n # image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n # if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n # raise TypeError(\n # \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n # )\n\n # if image_is_pil:\n # image_batch_size = 1\n # elif image_is_tensor:\n # image_batch_size = image.shape[0]\n # elif image_is_pil_list:\n # image_batch_size = len(image)\n # elif image_is_tensor_list:\n # image_batch_size = len(image)\n\n # if prompt is not None and isinstance(prompt, str):\n # prompt_batch_size = 1\n # elif prompt is not None and isinstance(prompt, list):\n # prompt_batch_size = len(prompt)\n # elif prompt_embeds is not None:\n # prompt_batch_size = prompt_embeds.shape[0]\n\n # if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n # raise ValueError(\n # f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n # )\n\n # def check_inputs(self, prompt, prompt_embeds):\n # self._check_inputs(self.image, prompt, prompt_embeds)\n\n # def prepare_image(self, width, height, batch_size, num_images_per_prompt, device, do_classifier_free_guidance):\n # self.image = self._prepare_image(\n # self.image, width, height, batch_size, num_images_per_prompt, device, self.controlnet.dtype\n # )\n # if do_classifier_free_guidance:\n # self.image = torch.cat([self.image] * 2)\n\n def __call__(\n self,\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond, #fix\n conditioning_scale,\n return_dict,\n ) -> Tuple:\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond,\n conditioning_scale, \n return_dict=False,\n )\n down_block_res_samples = [\n down_block_res_sample * conditioning_scale for down_block_res_sample in down_block_res_samples\n ]\n mid_block_res_sample *= conditioning_scale\n return (down_block_res_samples, mid_block_res_sample)" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
13,110
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
0
2023-12-15 01:22:37+00:00
16k
daihaojun554/biliscrapy
biliscrapy/network/bilibili_danmu.py
[ { "identifier": "bili_pb2", "path": "biliscrapy/network/protobuf/bili_pb2.py", "snippet": "DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\\n\\x08my.proto\\x12 bilibili.community.service.dm.v1\\\"d\\n\\x06\\x41vatar\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\x12\\x41\\n\\x0b\\x61vatar_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.AvatarType\\\"#\\n\\x06\\x42ubble\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\\"\\xc6\\x01\\n\\x08\\x42ubbleV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\x12\\x41\\n\\x0b\\x62ubble_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.BubbleType\\x12\\x15\\n\\rexposure_once\\x18\\x04 \\x01(\\x08\\x12\\x45\\n\\rexposure_type\\x18\\x05 \\x01(\\x0e\\x32..bilibili.community.service.dm.v1.ExposureType\\\"&\\n\\x06\\x42utton\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\x02 \\x01(\\x05\\\"X\\n\\x0e\\x42uzzwordConfig\\x12\\x46\\n\\x08keywords\\x18\\x01 \\x03(\\x0b\\x32\\x34.bilibili.community.service.dm.v1.BuzzwordShowConfig\\\"x\\n\\x12\\x42uzzwordShowConfig\\x12\\x0c\\n\\x04name\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06schema\\x18\\x02 \\x01(\\t\\x12\\x0e\\n\\x06source\\x18\\x03 \\x01(\\x05\\x12\\n\\n\\x02id\\x18\\x04 \\x01(\\x03\\x12\\x13\\n\\x0b\\x62uzzword_id\\x18\\x05 \\x01(\\x03\\x12\\x13\\n\\x0bschema_type\\x18\\x06 \\x01(\\x05\\\"{\\n\\x08\\x43heckBox\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12<\\n\\x04type\\x18\\x02 \\x01(\\x0e\\x32..bilibili.community.service.dm.v1.CheckboxType\\x12\\x15\\n\\rdefault_value\\x18\\x03 \\x01(\\x08\\x12\\x0c\\n\\x04show\\x18\\x04 \\x01(\\x08\\\"?\\n\\nCheckBoxV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0c\\n\\x04type\\x18\\x02 \\x01(\\x05\\x12\\x15\\n\\rdefault_value\\x18\\x03 \\x01(\\x08\\\"\\x82\\x02\\n\\x0b\\x43lickButton\\x12\\x15\\n\\rportrait_text\\x18\\x01 \\x03(\\t\\x12\\x16\\n\\x0elandscape_text\\x18\\x02 \\x03(\\t\\x12\\x1b\\n\\x13portrait_text_focus\\x18\\x03 \\x03(\\t\\x12\\x1c\\n\\x14landscape_text_focus\\x18\\x04 \\x03(\\t\\x12\\x41\\n\\x0brender_type\\x18\\x05 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.RenderType\\x12\\x0c\\n\\x04show\\x18\\x06 \\x01(\\x08\\x12\\x38\\n\\x06\\x62ubble\\x18\\x07 \\x01(\\x0b\\x32(.bilibili.community.service.dm.v1.Bubble\\\"\\xd5\\x01\\n\\rClickButtonV2\\x12\\x15\\n\\rportrait_text\\x18\\x01 \\x03(\\t\\x12\\x16\\n\\x0elandscape_text\\x18\\x02 \\x03(\\t\\x12\\x1b\\n\\x13portrait_text_focus\\x18\\x03 \\x03(\\t\\x12\\x1c\\n\\x14landscape_text_focus\\x18\\x04 \\x03(\\t\\x12\\x13\\n\\x0brender_type\\x18\\x05 \\x01(\\x05\\x12\\x17\\n\\x0ftext_input_post\\x18\\x06 \\x01(\\x08\\x12\\x15\\n\\rexposure_once\\x18\\x07 \\x01(\\x08\\x12\\x15\\n\\rexposure_type\\x18\\x08 \\x01(\\x05\\\"\\xa1\\x01\\n\\tCommandDm\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0b\\n\\x03mid\\x18\\x03 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ommand\\x18\\x04 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x05 \\x01(\\t\\x12\\x10\\n\\x08progress\\x18\\x06 \\x01(\\x05\\x12\\r\\n\\x05\\x63time\\x18\\x07 \\x01(\\t\\x12\\r\\n\\x05mtime\\x18\\x08 \\x01(\\t\\x12\\r\\n\\x05\\x65xtra\\x18\\t \\x01(\\t\\x12\\r\\n\\x05idStr\\x18\\n \\x01(\\t\\\"P\\n\\rDanmakuAIFlag\\x12?\\n\\x08\\x64m_flags\\x18\\x01 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuFlag\\\"\\xad\\x02\\n\\x0b\\x44\\x61nmakuElem\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\x03\\x12\\x10\\n\\x08progress\\x18\\x02 \\x01(\\x05\\x12\\x0c\\n\\x04mode\\x18\\x03 \\x01(\\x05\\x12\\x10\\n\\x08\\x66ontsize\\x18\\x04 \\x01(\\x05\\x12\\r\\n\\x05\\x63olor\\x18\\x05 \\x01(\\r\\x12\\x0f\\n\\x07midHash\\x18\\x06 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x07 \\x01(\\t\\x12\\r\\n\\x05\\x63time\\x18\\x08 \\x01(\\x03\\x12\\x0e\\n\\x06weight\\x18\\t \\x01(\\x05\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\n \\x01(\\t\\x12\\x0c\\n\\x04pool\\x18\\x0b \\x01(\\x05\\x12\\r\\n\\x05idStr\\x18\\x0c \\x01(\\t\\x12\\x0c\\n\\x04\\x61ttr\\x18\\r \\x01(\\x05\\x12\\x11\\n\\tanimation\\x18\\x16 \\x01(\\t\\x12\\x42\\n\\x08\\x63olorful\\x18\\x18 \\x01(\\x0e\\x32\\x30.bilibili.community.service.dm.v1.DmColorfulType\\\")\\n\\x0b\\x44\\x61nmakuFlag\\x12\\x0c\\n\\x04\\x64mid\\x18\\x01 \\x01(\\x03\\x12\\x0c\\n\\x04\\x66lag\\x18\\x02 \\x01(\\r\\\"K\\n\\x11\\x44\\x61nmakuFlagConfig\\x12\\x10\\n\\x08rec_flag\\x18\\x01 \\x01(\\x05\\x12\\x10\\n\\x08rec_text\\x18\\x02 \\x01(\\t\\x12\\x12\\n\\nrec_switch\\x18\\x03 \\x01(\\x05\\\"\\xe4\\x06\\n\\x18\\x44\\x61nmuDefaultPlayerConfig\\x12)\\n!player_danmaku_use_default_config\\x18\\x01 \\x01(\\x08\\x12,\\n$player_danmaku_ai_recommended_switch\\x18\\x04 \\x01(\\x08\\x12+\\n#player_danmaku_ai_recommended_level\\x18\\x05 \\x01(\\x05\\x12\\x1f\\n\\x17player_danmaku_blocktop\\x18\\x06 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockscroll\\x18\\x07 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockbottom\\x18\\x08 \\x01(\\x08\\x12$\\n\\x1cplayer_danmaku_blockcolorful\\x18\\t \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockrepeat\\x18\\n \\x01(\\x08\\x12#\\n\\x1bplayer_danmaku_blockspecial\\x18\\x0b \\x01(\\x08\\x12\\x1e\\n\\x16player_danmaku_opacity\\x18\\x0c \\x01(\\x02\\x12$\\n\\x1cplayer_danmaku_scalingfactor\\x18\\r \\x01(\\x02\\x12\\x1d\\n\\x15player_danmaku_domain\\x18\\x0e \\x01(\\x02\\x12\\x1c\\n\\x14player_danmaku_speed\\x18\\x0f \\x01(\\x05\\x12$\\n\\x1cinline_player_danmaku_switch\\x18\\x10 \\x01(\\x08\\x12)\\n!player_danmaku_senior_mode_switch\\x18\\x11 \\x01(\\x05\\x12.\\n&player_danmaku_ai_recommended_level_v2\\x18\\x12 \\x01(\\x05\\x12\\x98\\x01\\n*player_danmaku_ai_recommended_level_v2_map\\x18\\x13 \\x03(\\x0b\\x32\\x64.bilibili.community.service.dm.v1.DanmuDefaultPlayerConfig.PlayerDanmakuAiRecommendedLevelV2MapEntry\\x1aK\\n)PlayerDanmakuAiRecommendedLevelV2MapEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\x05\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\"\\x8f\\x08\\n\\x11\\x44\\x61nmuPlayerConfig\\x12\\x1d\\n\\x15player_danmaku_switch\\x18\\x01 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_switch_save\\x18\\x02 \\x01(\\x08\\x12)\\n!player_danmaku_use_default_config\\x18\\x03 \\x01(\\x08\\x12,\\n$player_danmaku_ai_recommended_switch\\x18\\x04 \\x01(\\x08\\x12+\\n#player_danmaku_ai_recommended_level\\x18\\x05 \\x01(\\x05\\x12\\x1f\\n\\x17player_danmaku_blocktop\\x18\\x06 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockscroll\\x18\\x07 \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockbottom\\x18\\x08 \\x01(\\x08\\x12$\\n\\x1cplayer_danmaku_blockcolorful\\x18\\t \\x01(\\x08\\x12\\\"\\n\\x1aplayer_danmaku_blockrepeat\\x18\\n \\x01(\\x08\\x12#\\n\\x1bplayer_danmaku_blockspecial\\x18\\x0b \\x01(\\x08\\x12\\x1e\\n\\x16player_danmaku_opacity\\x18\\x0c \\x01(\\x02\\x12$\\n\\x1cplayer_danmaku_scalingfactor\\x18\\r \\x01(\\x02\\x12\\x1d\\n\\x15player_danmaku_domain\\x18\\x0e \\x01(\\x02\\x12\\x1c\\n\\x14player_danmaku_speed\\x18\\x0f \\x01(\\x05\\x12&\\n\\x1eplayer_danmaku_enableblocklist\\x18\\x10 \\x01(\\x08\\x12$\\n\\x1cinline_player_danmaku_switch\\x18\\x11 \\x01(\\x08\\x12$\\n\\x1cinline_player_danmaku_config\\x18\\x12 \\x01(\\x05\\x12&\\n\\x1eplayer_danmaku_ios_switch_save\\x18\\x13 \\x01(\\x05\\x12)\\n!player_danmaku_senior_mode_switch\\x18\\x14 \\x01(\\x05\\x12.\\n&player_danmaku_ai_recommended_level_v2\\x18\\x15 \\x01(\\x05\\x12\\x91\\x01\\n*player_danmaku_ai_recommended_level_v2_map\\x18\\x16 \\x03(\\x0b\\x32].bilibili.community.service.dm.v1.DanmuPlayerConfig.PlayerDanmakuAiRecommendedLevelV2MapEntry\\x1aK\\n)PlayerDanmakuAiRecommendedLevelV2MapEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\x05\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\"0\\n\\x16\\x44\\x61nmuPlayerConfigPanel\\x12\\x16\\n\\x0eselection_text\\x18\\x01 \\x01(\\t\\\"K\\n\\x18\\x44\\x61nmuPlayerDynamicConfig\\x12\\x10\\n\\x08progress\\x18\\x01 \\x01(\\x05\\x12\\x1d\\n\\x15player_danmaku_domain\\x18\\x0e \\x01(\\x02\\\"\\x90\\x03\\n\\x15\\x44\\x61nmuPlayerViewConfig\\x12\\x61\\n\\x1d\\x64\\x61nmuku_default_player_config\\x18\\x01 \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.DanmuDefaultPlayerConfig\\x12R\\n\\x15\\x64\\x61nmuku_player_config\\x18\\x02 \\x01(\\x0b\\x32\\x33.bilibili.community.service.dm.v1.DanmuPlayerConfig\\x12\\x61\\n\\x1d\\x64\\x61nmuku_player_dynamic_config\\x18\\x03 \\x03(\\x0b\\x32:.bilibili.community.service.dm.v1.DanmuPlayerDynamicConfig\\x12]\\n\\x1b\\x64\\x61nmuku_player_config_panel\\x18\\x04 \\x01(\\x0b\\x32\\x38.bilibili.community.service.dm.v1.DanmuPlayerConfigPanel\\\"\\xd8\\x04\\n\\x14\\x44\\x61nmuWebPlayerConfig\\x12\\x11\\n\\tdm_switch\\x18\\x01 \\x01(\\x08\\x12\\x11\\n\\tai_switch\\x18\\x02 \\x01(\\x08\\x12\\x10\\n\\x08\\x61i_level\\x18\\x03 \\x01(\\x05\\x12\\x10\\n\\x08\\x62locktop\\x18\\x04 \\x01(\\x08\\x12\\x13\\n\\x0b\\x62lockscroll\\x18\\x05 \\x01(\\x08\\x12\\x13\\n\\x0b\\x62lockbottom\\x18\\x06 \\x01(\\x08\\x12\\x12\\n\\nblockcolor\\x18\\x07 \\x01(\\x08\\x12\\x14\\n\\x0c\\x62lockspecial\\x18\\x08 \\x01(\\x08\\x12\\x14\\n\\x0cpreventshade\\x18\\t \\x01(\\x08\\x12\\r\\n\\x05\\x64mask\\x18\\n \\x01(\\x08\\x12\\x0f\\n\\x07opacity\\x18\\x0b \\x01(\\x02\\x12\\x0e\\n\\x06\\x64marea\\x18\\x0c \\x01(\\x05\\x12\\x11\\n\\tspeedplus\\x18\\r \\x01(\\x02\\x12\\x10\\n\\x08\\x66ontsize\\x18\\x0e \\x01(\\x02\\x12\\x12\\n\\nscreensync\\x18\\x0f \\x01(\\x08\\x12\\x11\\n\\tspeedsync\\x18\\x10 \\x01(\\x08\\x12\\x12\\n\\nfontfamily\\x18\\x11 \\x01(\\t\\x12\\x0c\\n\\x04\\x62old\\x18\\x12 \\x01(\\x08\\x12\\x12\\n\\nfontborder\\x18\\x13 \\x01(\\x05\\x12\\x11\\n\\tdraw_type\\x18\\x14 \\x01(\\t\\x12\\x1a\\n\\x12senior_mode_switch\\x18\\x15 \\x01(\\x05\\x12\\x13\\n\\x0b\\x61i_level_v2\\x18\\x16 \\x01(\\x05\\x12\\x61\\n\\x0f\\x61i_level_v2_map\\x18\\x17 \\x03(\\x0b\\x32H.bilibili.community.service.dm.v1.DanmuWebPlayerConfig.AiLevelV2MapEntry\\x1a\\x33\\n\\x11\\x41iLevelV2MapEntry\\x12\\x0b\\n\\x03key\\x18\\x01 \\x01(\\x05\\x12\\r\\n\\x05value\\x18\\x02 \\x01(\\x05:\\x02\\x38\\x01\\\"Y\\n\\nDmColorful\\x12>\\n\\x04type\\x18\\x01 \\x01(\\x0e\\x32\\x30.bilibili.community.service.dm.v1.DmColorfulType\\x12\\x0b\\n\\x03src\\x18\\x02 \\x01(\\t\\\"A\\n\\x0f\\x44mExpoReportReq\\x12\\x12\\n\\nsession_id\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\r\\n\\x05spmid\\x18\\x04 \\x01(\\t\\\"\\x11\\n\\x0f\\x44mExpoReportRes\\\"\\xe3\\x0c\\n\\x11\\x44mPlayerConfigReq\\x12\\n\\n\\x02ts\\x18\\x01 \\x01(\\x03\\x12\\x45\\n\\x06switch\\x18\\x02 \\x01(\\x0b\\x32\\x35.bilibili.community.service.dm.v1.PlayerDanmakuSwitch\\x12N\\n\\x0bswitch_save\\x18\\x03 \\x01(\\x0b\\x32\\x39.bilibili.community.service.dm.v1.PlayerDanmakuSwitchSave\\x12[\\n\\x12use_default_config\\x18\\x04 \\x01(\\x0b\\x32?.bilibili.community.service.dm.v1.PlayerDanmakuUseDefaultConfig\\x12\\x61\\n\\x15\\x61i_recommended_switch\\x18\\x05 \\x01(\\x0b\\x32\\x42.bilibili.community.service.dm.v1.PlayerDanmakuAiRecommendedSwitch\\x12_\\n\\x14\\x61i_recommended_level\\x18\\x06 \\x01(\\x0b\\x32\\x41.bilibili.community.service.dm.v1.PlayerDanmakuAiRecommendedLevel\\x12I\\n\\x08\\x62locktop\\x18\\x07 \\x01(\\x0b\\x32\\x37.bilibili.community.service.dm.v1.PlayerDanmakuBlocktop\\x12O\\n\\x0b\\x62lockscroll\\x18\\x08 \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.PlayerDanmakuBlockscroll\\x12O\\n\\x0b\\x62lockbottom\\x18\\t \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.PlayerDanmakuBlockbottom\\x12S\\n\\rblockcolorful\\x18\\n \\x01(\\x0b\\x32<.bilibili.community.service.dm.v1.PlayerDanmakuBlockcolorful\\x12O\\n\\x0b\\x62lockrepeat\\x18\\x0b \\x01(\\x0b\\x32:.bilibili.community.service.dm.v1.PlayerDanmakuBlockrepeat\\x12Q\\n\\x0c\\x62lockspecial\\x18\\x0c \\x01(\\x0b\\x32;.bilibili.community.service.dm.v1.PlayerDanmakuBlockspecial\\x12G\\n\\x07opacity\\x18\\r \\x01(\\x0b\\x32\\x36.bilibili.community.service.dm.v1.PlayerDanmakuOpacity\\x12S\\n\\rscalingfactor\\x18\\x0e \\x01(\\x0b\\x32<.bilibili.community.service.dm.v1.PlayerDanmakuScalingfactor\\x12\\x45\\n\\x06\\x64omain\\x18\\x0f \\x01(\\x0b\\x32\\x35.bilibili.community.service.dm.v1.PlayerDanmakuDomain\\x12\\x43\\n\\x05speed\\x18\\x10 \\x01(\\x0b\\x32\\x34.bilibili.community.service.dm.v1.PlayerDanmakuSpeed\\x12W\\n\\x0f\\x65nableblocklist\\x18\\x11 \\x01(\\x0b\\x32>.bilibili.community.service.dm.v1.PlayerDanmakuEnableblocklist\\x12^\\n\\x19inlinePlayerDanmakuSwitch\\x18\\x12 \\x01(\\x0b\\x32;.bilibili.community.service.dm.v1.InlinePlayerDanmakuSwitch\\x12[\\n\\x12senior_mode_switch\\x18\\x13 \\x01(\\x0b\\x32?.bilibili.community.service.dm.v1.PlayerDanmakuSeniorModeSwitch\\x12\\x64\\n\\x17\\x61i_recommended_level_v2\\x18\\x14 \\x01(\\x0b\\x32\\x43.bilibili.community.service.dm.v1.PlayerDanmakuAiRecommendedLevelV2\\\"/\\n\\x0b\\x44mSegConfig\\x12\\x11\\n\\tpage_size\\x18\\x01 \\x01(\\x03\\x12\\r\\n\\x05total\\x18\\x02 \\x01(\\x03\\\"\\xe4\\x01\\n\\x10\\x44mSegMobileReply\\x12<\\n\\x05\\x65lems\\x18\\x01 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuElem\\x12\\r\\n\\x05state\\x18\\x02 \\x01(\\x05\\x12@\\n\\x07\\x61i_flag\\x18\\x03 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.DanmakuAIFlag\\x12\\x41\\n\\x0b\\x63olorfulSrc\\x18\\x05 \\x03(\\x0b\\x32,.bilibili.community.service.dm.v1.DmColorful\\\"\\xa6\\x01\\n\\x0e\\x44mSegMobileReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\x15\\n\\rsegment_index\\x18\\x04 \\x01(\\x03\\x12\\x16\\n\\x0eteenagers_mode\\x18\\x05 \\x01(\\x05\\x12\\n\\n\\x02ps\\x18\\x06 \\x01(\\x03\\x12\\n\\n\\x02pe\\x18\\x07 \\x01(\\x03\\x12\\x11\\n\\tpull_mode\\x18\\x08 \\x01(\\x05\\x12\\x12\\n\\nfrom_scene\\x18\\t \\x01(\\x05\\\"]\\n\\rDmSegOttReply\\x12\\x0e\\n\\x06\\x63losed\\x18\\x01 \\x01(\\x08\\x12<\\n\\x05\\x65lems\\x18\\x02 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuElem\\\"L\\n\\x0b\\x44mSegOttReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\x15\\n\\rsegment_index\\x18\\x04 \\x01(\\x03\\\"]\\n\\rDmSegSDKReply\\x12\\x0e\\n\\x06\\x63losed\\x18\\x01 \\x01(\\x08\\x12<\\n\\x05\\x65lems\\x18\\x02 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.DanmakuElem\\\"L\\n\\x0b\\x44mSegSDKReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\x15\\n\\rsegment_index\\x18\\x04 \\x01(\\x03\\\"\\xde\\x06\\n\\x0b\\x44mViewReply\\x12\\x0e\\n\\x06\\x63losed\\x18\\x01 \\x01(\\x08\\x12\\x39\\n\\x04mask\\x18\\x02 \\x01(\\x0b\\x32+.bilibili.community.service.dm.v1.VideoMask\\x12\\x41\\n\\x08subtitle\\x18\\x03 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.VideoSubtitle\\x12\\x13\\n\\x0bspecial_dms\\x18\\x04 \\x03(\\t\\x12\\x44\\n\\x07\\x61i_flag\\x18\\x05 \\x01(\\x0b\\x32\\x33.bilibili.community.service.dm.v1.DanmakuFlagConfig\\x12N\\n\\rplayer_config\\x18\\x06 \\x01(\\x0b\\x32\\x37.bilibili.community.service.dm.v1.DanmuPlayerViewConfig\\x12\\x16\\n\\x0esend_box_style\\x18\\x07 \\x01(\\x05\\x12\\r\\n\\x05\\x61llow\\x18\\x08 \\x01(\\x08\\x12\\x11\\n\\tcheck_box\\x18\\t \\x01(\\t\\x12\\x1a\\n\\x12\\x63heck_box_show_msg\\x18\\n \\x01(\\t\\x12\\x18\\n\\x10text_placeholder\\x18\\x0b \\x01(\\t\\x12\\x19\\n\\x11input_placeholder\\x18\\x0c \\x01(\\t\\x12\\x1d\\n\\x15report_filter_content\\x18\\r \\x03(\\t\\x12\\x41\\n\\x0b\\x65xpo_report\\x18\\x0e \\x01(\\x0b\\x32,.bilibili.community.service.dm.v1.ExpoReport\\x12I\\n\\x0f\\x62uzzword_config\\x18\\x0f \\x01(\\x0b\\x32\\x30.bilibili.community.service.dm.v1.BuzzwordConfig\\x12\\x42\\n\\x0b\\x65xpressions\\x18\\x10 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.Expressions\\x12?\\n\\npost_panel\\x18\\x11 \\x03(\\x0b\\x32+.bilibili.community.service.dm.v1.PostPanel\\x12\\x15\\n\\ractivity_meta\\x18\\x12 \\x03(\\t\\x12\\x42\\n\\x0bpost_panel2\\x18\\x13 \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.PostPanelV2\\\"X\\n\\tDmViewReq\\x12\\x0b\\n\\x03pid\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03oid\\x18\\x02 \\x01(\\x03\\x12\\x0c\\n\\x04type\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05spmid\\x18\\x04 \\x01(\\t\\x12\\x14\\n\\x0cis_hard_boot\\x18\\x05 \\x01(\\x05\\\"\\xc4\\x04\\n\\x0e\\x44mWebViewReply\\x12\\r\\n\\x05state\\x18\\x01 \\x01(\\x05\\x12\\x0c\\n\\x04text\\x18\\x02 \\x01(\\t\\x12\\x11\\n\\ttext_side\\x18\\x03 \\x01(\\t\\x12=\\n\\x06\\x64m_sge\\x18\\x04 \\x01(\\x0b\\x32-.bilibili.community.service.dm.v1.DmSegConfig\\x12\\x41\\n\\x04\\x66lag\\x18\\x05 \\x01(\\x0b\\x32\\x33.bilibili.community.service.dm.v1.DanmakuFlagConfig\\x12\\x13\\n\\x0bspecial_dms\\x18\\x06 \\x03(\\t\\x12\\x11\\n\\tcheck_box\\x18\\x07 \\x01(\\x08\\x12\\r\\n\\x05\\x63ount\\x18\\x08 \\x01(\\x03\\x12?\\n\\ncommandDms\\x18\\t \\x03(\\x0b\\x32+.bilibili.community.service.dm.v1.CommandDm\\x12M\\n\\rplayer_config\\x18\\n \\x01(\\x0b\\x32\\x36.bilibili.community.service.dm.v1.DanmuWebPlayerConfig\\x12\\x1d\\n\\x15report_filter_content\\x18\\x0b \\x03(\\t\\x12\\x42\\n\\x0b\\x65xpressions\\x18\\x0c \\x03(\\x0b\\x32-.bilibili.community.service.dm.v1.Expressions\\x12?\\n\\npost_panel\\x18\\r \\x03(\\x0b\\x32+.bilibili.community.service.dm.v1.PostPanel\\x12\\x15\\n\\ractivity_meta\\x18\\x0e \\x03(\\t\\\"*\\n\\nExpoReport\\x12\\x1c\\n\\x14should_report_at_end\\x18\\x01 \\x01(\\x08\\\"d\\n\\nExpression\\x12\\x0f\\n\\x07keyword\\x18\\x01 \\x03(\\t\\x12\\x0b\\n\\x03url\\x18\\x02 \\x01(\\t\\x12\\x38\\n\\x06period\\x18\\x03 \\x03(\\x0b\\x32(.bilibili.community.service.dm.v1.Period\\\"I\\n\\x0b\\x45xpressions\\x12:\\n\\x04\\x64\\x61ta\\x18\\x01 \\x03(\\x0b\\x32,.bilibili.community.service.dm.v1.Expression\\\"*\\n\\x19InlinePlayerDanmakuSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"\\'\\n\\x05Label\\x12\\r\\n\\x05title\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x02 \\x03(\\t\\\"W\\n\\x07LabelV2\\x12\\r\\n\\x05title\\x18\\x01 \\x01(\\t\\x12\\x0f\\n\\x07\\x63ontent\\x18\\x02 \\x03(\\t\\x12\\x15\\n\\rexposure_once\\x18\\x03 \\x01(\\x08\\x12\\x15\\n\\rexposure_type\\x18\\x04 \\x01(\\x05\\\"$\\n\\x06Period\\x12\\r\\n\\x05start\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03\\x65nd\\x18\\x02 \\x01(\\x03\\\"0\\n\\x1fPlayerDanmakuAiRecommendedLevel\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"2\\n!PlayerDanmakuAiRecommendedLevelV2\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x05\\\"1\\n PlayerDanmakuAiRecommendedSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\")\\n\\x18PlayerDanmakuBlockbottom\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"+\\n\\x1aPlayerDanmakuBlockcolorful\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\")\\n\\x18PlayerDanmakuBlockrepeat\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\")\\n\\x18PlayerDanmakuBlockscroll\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"*\\n\\x19PlayerDanmakuBlockspecial\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"&\\n\\x15PlayerDanmakuBlocktop\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"$\\n\\x13PlayerDanmakuDomain\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x02\\\"-\\n\\x1cPlayerDanmakuEnableblocklist\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"%\\n\\x14PlayerDanmakuOpacity\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x02\\\"+\\n\\x1aPlayerDanmakuScalingfactor\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x02\\\".\\n\\x1dPlayerDanmakuSeniorModeSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x05\\\"#\\n\\x12PlayerDanmakuSpeed\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x05\\\"8\\n\\x13PlayerDanmakuSwitch\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\x12\\x12\\n\\ncan_ignore\\x18\\x02 \\x01(\\x08\\\"(\\n\\x17PlayerDanmakuSwitchSave\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\".\\n\\x1dPlayerDanmakuUseDefaultConfig\\x12\\r\\n\\x05value\\x18\\x01 \\x01(\\x08\\\"\\x8c\\x03\\n\\tPostPanel\\x12\\r\\n\\x05start\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03\\x65nd\\x18\\x02 \\x01(\\x03\\x12\\x10\\n\\x08priority\\x18\\x03 \\x01(\\x03\\x12\\x0e\\n\\x06\\x62iz_id\\x18\\x04 \\x01(\\x03\\x12\\x44\\n\\x08\\x62iz_type\\x18\\x05 \\x01(\\x0e\\x32\\x32.bilibili.community.service.dm.v1.PostPanelBizType\\x12\\x43\\n\\x0c\\x63lick_button\\x18\\x06 \\x01(\\x0b\\x32-.bilibili.community.service.dm.v1.ClickButton\\x12?\\n\\ntext_input\\x18\\x07 \\x01(\\x0b\\x32+.bilibili.community.service.dm.v1.TextInput\\x12=\\n\\tcheck_box\\x18\\x08 \\x01(\\x0b\\x32*.bilibili.community.service.dm.v1.CheckBox\\x12\\x36\\n\\x05toast\\x18\\t \\x01(\\x0b\\x32\\'.bilibili.community.service.dm.v1.Toast\\\"\\xcb\\x03\\n\\x0bPostPanelV2\\x12\\r\\n\\x05start\\x18\\x01 \\x01(\\x03\\x12\\x0b\\n\\x03\\x65nd\\x18\\x02 \\x01(\\x03\\x12\\x10\\n\\x08\\x62iz_type\\x18\\x03 \\x01(\\x05\\x12\\x45\\n\\x0c\\x63lick_button\\x18\\x04 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.ClickButtonV2\\x12\\x41\\n\\ntext_input\\x18\\x05 \\x01(\\x0b\\x32-.bilibili.community.service.dm.v1.TextInputV2\\x12?\\n\\tcheck_box\\x18\\x06 \\x01(\\x0b\\x32,.bilibili.community.service.dm.v1.CheckBoxV2\\x12\\x38\\n\\x05toast\\x18\\x07 \\x01(\\x0b\\x32).bilibili.community.service.dm.v1.ToastV2\\x12:\\n\\x06\\x62ubble\\x18\\x08 \\x01(\\x0b\\x32*.bilibili.community.service.dm.v1.BubbleV2\\x12\\x38\\n\\x05label\\x18\\t \\x01(\\x0b\\x32).bilibili.community.service.dm.v1.LabelV2\\x12\\x13\\n\\x0bpost_status\\x18\\n \\x01(\\x05\\\")\\n\\x08Response\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x0f\\n\\x07message\\x18\\x02 \\x01(\\t\\\"\\xf9\\x02\\n\\x0cSubtitleItem\\x12\\n\\n\\x02id\\x18\\x01 \\x01(\\x03\\x12\\x0e\\n\\x06id_str\\x18\\x02 \\x01(\\t\\x12\\x0b\\n\\x03lan\\x18\\x03 \\x01(\\t\\x12\\x0f\\n\\x07lan_doc\\x18\\x04 \\x01(\\t\\x12\\x14\\n\\x0csubtitle_url\\x18\\x05 \\x01(\\t\\x12:\\n\\x06\\x61uthor\\x18\\x06 \\x01(\\x0b\\x32*.bilibili.community.service.dm.v1.UserInfo\\x12<\\n\\x04type\\x18\\x07 \\x01(\\x0e\\x32..bilibili.community.service.dm.v1.SubtitleType\\x12\\x15\\n\\rlan_doc_brief\\x18\\x08 \\x01(\\t\\x12\\x41\\n\\x07\\x61i_type\\x18\\t \\x01(\\x0e\\x32\\x30.bilibili.community.service.dm.v1.SubtitleAiType\\x12\\x45\\n\\tai_status\\x18\\n \\x01(\\x0e\\x32\\x32.bilibili.community.service.dm.v1.SubtitleAiStatus\\\"\\xe8\\x02\\n\\tTextInput\\x12\\x1c\\n\\x14portrait_placeholder\\x18\\x01 \\x03(\\t\\x12\\x1d\\n\\x15landscape_placeholder\\x18\\x02 \\x03(\\t\\x12\\x41\\n\\x0brender_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.RenderType\\x12\\x18\\n\\x10placeholder_post\\x18\\x04 \\x01(\\x08\\x12\\x0c\\n\\x04show\\x18\\x05 \\x01(\\x08\\x12\\x38\\n\\x06\\x61vatar\\x18\\x06 \\x03(\\x0b\\x32(.bilibili.community.service.dm.v1.Avatar\\x12\\x41\\n\\x0bpost_status\\x18\\x07 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.PostStatus\\x12\\x36\\n\\x05label\\x18\\x08 \\x01(\\x0b\\x32\\'.bilibili.community.service.dm.v1.Label\\\"\\xfb\\x01\\n\\x0bTextInputV2\\x12\\x1c\\n\\x14portrait_placeholder\\x18\\x01 \\x03(\\t\\x12\\x1d\\n\\x15landscape_placeholder\\x18\\x02 \\x03(\\t\\x12\\x41\\n\\x0brender_type\\x18\\x03 \\x01(\\x0e\\x32,.bilibili.community.service.dm.v1.RenderType\\x12\\x18\\n\\x10placeholder_post\\x18\\x04 \\x01(\\x08\\x12\\x38\\n\\x06\\x61vatar\\x18\\x05 \\x03(\\x0b\\x32(.bilibili.community.service.dm.v1.Avatar\\x12\\x18\\n\\x10text_input_limit\\x18\\x06 \\x01(\\x05\\\"o\\n\\x05Toast\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08\\x64uration\\x18\\x02 \\x01(\\x05\\x12\\x0c\\n\\x04show\\x18\\x03 \\x01(\\x08\\x12\\x38\\n\\x06\\x62utton\\x18\\x04 \\x01(\\x0b\\x32(.bilibili.community.service.dm.v1.Button\\\"-\\n\\rToastButtonV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06\\x61\\x63tion\\x18\\x02 \\x01(\\x05\\\"s\\n\\x07ToastV2\\x12\\x0c\\n\\x04text\\x18\\x01 \\x01(\\t\\x12\\x10\\n\\x08\\x64uration\\x18\\x02 \\x01(\\x05\\x12H\\n\\x0ftoast_button_v2\\x18\\x03 \\x01(\\x0b\\x32/.bilibili.community.service.dm.v1.ToastButtonV2\\\"\\\\\\n\\x08UserInfo\\x12\\x0b\\n\\x03mid\\x18\\x01 \\x01(\\x03\\x12\\x0c\\n\\x04name\\x18\\x02 \\x01(\\t\\x12\\x0b\\n\\x03sex\\x18\\x03 \\x01(\\t\\x12\\x0c\\n\\x04\\x66\\x61\\x63\\x65\\x18\\x04 \\x01(\\t\\x12\\x0c\\n\\x04sign\\x18\\x05 \\x01(\\t\\x12\\x0c\\n\\x04rank\\x18\\x06 \\x01(\\x05\\\"S\\n\\tVideoMask\\x12\\x0b\\n\\x03\\x63id\\x18\\x01 \\x01(\\x03\\x12\\x0c\\n\\x04plat\\x18\\x02 \\x01(\\x05\\x12\\x0b\\n\\x03\\x66ps\\x18\\x03 \\x01(\\x05\\x12\\x0c\\n\\x04time\\x18\\x04 \\x01(\\x03\\x12\\x10\\n\\x08mask_url\\x18\\x05 \\x01(\\t\\\"o\\n\\rVideoSubtitle\\x12\\x0b\\n\\x03lan\\x18\\x01 \\x01(\\t\\x12\\x0e\\n\\x06lanDoc\\x18\\x02 \\x01(\\t\\x12\\x41\\n\\tsubtitles\\x18\\x03 \\x03(\\x0b\\x32..bilibili.community.service.dm.v1.SubtitleItem*3\\n\\nAvatarType\\x12\\x12\\n\\x0e\\x41vatarTypeNone\\x10\\x00\\x12\\x11\\n\\rAvatarTypeNFT\\x10\\x01*Y\\n\\nBubbleType\\x12\\x12\\n\\x0e\\x42ubbleTypeNone\\x10\\x00\\x12\\x19\\n\\x15\\x42ubbleTypeClickButton\\x10\\x01\\x12\\x1c\\n\\x18\\x42ubbleTypeDmSettingPanel\\x10\\x02*X\\n\\x0c\\x43heckboxType\\x12\\x14\\n\\x10\\x43heckboxTypeNone\\x10\\x00\\x12\\x19\\n\\x15\\x43heckboxTypeEncourage\\x10\\x01\\x12\\x17\\n\\x13\\x43heckboxTypeColorDM\\x10\\x02*L\\n\\tDMAttrBit\\x12\\x14\\n\\x10\\x44MAttrBitProtect\\x10\\x00\\x12\\x15\\n\\x11\\x44MAttrBitFromLive\\x10\\x01\\x12\\x12\\n\\x0e\\x44MAttrHighLike\\x10\\x02*5\\n\\x0e\\x44mColorfulType\\x12\\x0c\\n\\x08NoneType\\x10\\x00\\x12\\x15\\n\\x0fVipGradualColor\\x10\\xe1\\xd4\\x03*<\\n\\x0c\\x45xposureType\\x12\\x14\\n\\x10\\x45xposureTypeNone\\x10\\x00\\x12\\x16\\n\\x12\\x45xposureTypeDMSend\\x10\\x01*\\xc1\\x01\\n\\x10PostPanelBizType\\x12\\x18\\n\\x14PostPanelBizTypeNone\\x10\\x00\\x12\\x1d\\n\\x19PostPanelBizTypeEncourage\\x10\\x01\\x12\\x1b\\n\\x17PostPanelBizTypeColorDM\\x10\\x02\\x12\\x19\\n\\x15PostPanelBizTypeNFTDM\\x10\\x03\\x12\\x1d\\n\\x19PostPanelBizTypeFragClose\\x10\\x04\\x12\\x1d\\n\\x19PostPanelBizTypeRecommend\\x10\\x05*8\\n\\nPostStatus\\x12\\x14\\n\\x10PostStatusNormal\\x10\\x00\\x12\\x14\\n\\x10PostStatusClosed\\x10\\x01*N\\n\\nRenderType\\x12\\x12\\n\\x0eRenderTypeNone\\x10\\x00\\x12\\x14\\n\\x10RenderTypeSingle\\x10\\x01\\x12\\x16\\n\\x12RenderTypeRotation\\x10\\x02*6\\n\\x10SubtitleAiStatus\\x12\\x08\\n\\x04None\\x10\\x00\\x12\\x0c\\n\\x08\\x45xposure\\x10\\x01\\x12\\n\\n\\x06\\x41ssist\\x10\\x02*+\\n\\x0eSubtitleAiType\\x12\\n\\n\\x06Normal\\x10\\x00\\x12\\r\\n\\tTranslate\\x10\\x01*\\x1e\\n\\x0cSubtitleType\\x12\\x06\\n\\x02\\x43\\x43\\x10\\x00\\x12\\x06\\n\\x02\\x41I\\x10\\x01*N\\n\\x11ToastFunctionType\\x12\\x19\\n\\x15ToastFunctionTypeNone\\x10\\x00\\x12\\x1e\\n\\x1aToastFunctionTypePostPanel\\x10\\x01\\x32\\xa0\\x05\\n\\x02\\x44M\\x12s\\n\\x0b\\x44mSegMobile\\x12\\x30.bilibili.community.service.dm.v1.DmSegMobileReq\\x1a\\x32.bilibili.community.service.dm.v1.DmSegMobileReply\\x12\\x64\\n\\x06\\x44mView\\x12+.bilibili.community.service.dm.v1.DmViewReq\\x1a-.bilibili.community.service.dm.v1.DmViewReply\\x12q\\n\\x0e\\x44mPlayerConfig\\x12\\x33.bilibili.community.service.dm.v1.DmPlayerConfigReq\\x1a*.bilibili.community.service.dm.v1.Response\\x12j\\n\\x08\\x44mSegOtt\\x12-.bilibili.community.service.dm.v1.DmSegOttReq\\x1a/.bilibili.community.service.dm.v1.DmSegOttReply\\x12j\\n\\x08\\x44mSegSDK\\x12-.bilibili.community.service.dm.v1.DmSegSDKReq\\x1a/.bilibili.community.service.dm.v1.DmSegSDKReply\\x12t\\n\\x0c\\x44mExpoReport\\x12\\x31.bilibili.community.service.dm.v1.DmExpoReportReq\\x1a\\x31.bilibili.community.service.dm.v1.DmExpoReportResb\\x06proto3')" }, { "identifier": "bili_utils", "path": "biliscrapy/network/bilibili_utils.py", "snippet": "class bili_utils:\n def __init__(self):\n self.logger = logging.getLogger('log')\n self.header = headers\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')\n with open(file_path, 'r') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n\n def bv_get(self, bvorurl):\n # https://api.bilibili.com/x/web-interface/view?bvid=BV1uG41197Tf\n # 将bv提取出来\n bv_identifier = \"BV\" # BV号的标识符\n if \"http://\" in bvorurl or \"https://\" in bvorurl: # 检查是否是一个URL\n self.logger.info(\"你输入的是http链接,正在解析...\")\n bv_index = bvorurl.find(bv_identifier)\n if bv_index != -1: # 如果找到了BV号\n bv = bvorurl[bv_index:bv_index + len(bv_identifier) + 10] # 提取BV号\n self.logger.info(f\"BV号为......: {bv}\")\n return bv\n else:\n self.logger.info(\"你输入的链接地址有误!\")\n return\n elif bv_identifier in bvorurl: # 如果输入的是BV号\n self.logger.info(f\"你输入的是BV号{bvorurl},正在解析...\")\n bv = bvorurl\n return bv\n else:\n self.logger.info(f\"请输入正确的链接地址或BV号!,{bvorurl}\")\n return \"BV1111111111\"\n\n '''\n av 就是 oid 评论里面的参数\n '''\n\n def bv2av(self, bv):\n bv2av_url = 'https://api.bilibili.com/x/web-interface/view?bvid='\n if bv.startswith(\"BV\"):\n url = bv2av_url + str(bv)\n retry_count = 0\n max_retries = 10\n retry_delay = 1 # seconds\n while retry_count < max_retries:\n try:\n response = requests.get(url,headers=headers,cookies=self.cookies)\n response.raise_for_status() # 检查请求是否成功\n data = response.json()\n # self.logger.info(data)\n if 'data' in data and 'aid' in data['data']:\n avid = data['data']['aid']\n self.logger.info(f\"找到的avid{avid}\")\n return avid\n else:\n self.logger.info(\"未找到有效的aid值,正在重新尝试获取...\")\n retry_count += 1\n time.sleep(retry_delay)\n except (requests.RequestException, ValueError) as e:\n self.logger.info(f\"请求发生错误:{e}\")\n retry_count += 1\n self.logger.info(\"服务器返回错误!请稍后再试!\")\n self.logger.info(f\"正在重新尝试获取aid,尝试次数==>{retry_count}\")\n time.sleep(retry_delay)\n\n return None\n\n '''\n cid 是弹幕用的参数\n '''\n\n def bv2cid(self, bv):\n url = f\"https://api.bilibili.com/x/player/pagelist?bvid={str(bv)}&jsonp=jsonp\"\n retry_count = 1\n json_s = requests.get(url,headers=headers,cookies=self.cookies).json()\n self.logger.info(\"bv====》\"+bv)\n if json_s['code'] == 0:\n cid = json_s['data'][0]['cid']\n self.logger.info(\"提取出来的cid是:\" + str(cid))\n return cid\n else:\n self.logger.error(\"服务器返回错误!请稍后再试!\")\n retry_count+=1\n if retry_count > 10:\n self.logger.error(\"尝试次数过多,请稍后再试!\")\n return None\n else:\n self.logger.error(\"正在重新尝试获取cid,尝试次数==>\" + str(retry_count))\n return self.bv2cid(bv)\n\n def get_bilibili_cookies(self):\n options = webdriver.ChromeOptions()\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n # 动态获取路径 不用每次都手动输入路径\n # chromedriver.exe 的路径\n # 获取当前脚本的绝对路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n\n # 构建 chromedriver 的绝对路径\n driver_path = os.path.join(current_path, 'chromedriver.exe')\n\n # 创建 WebDriver 服务\n service = Service(driver_path)\n # service = Service('./chromedriver.exe')\n options.add_argument('--no-sandbox')\n options.binary_location='C:\\\\Program Files\\\\Google\\\\chrome-win64\\\\chrome.exe'\n driver = webdriver.Chrome(options=options, service=service)\n\n # 打开 Bilibili 网站\n driver.get('https://www.bilibili.com/')\n #\n login_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\n '#i_cecream > div.bili-feed4 > div.bili-header.large-header > div.bili-header__bar > ul.right-entry > li:nth-child(1) > li > div.right-entry__outside.go-login-btn')))\n login_btn.click()\n # 等待登录完成成\n time.sleep(10)\n driver.get('https://www.bilibili.com/')\n # 在这里,模拟登录流程(需要输入账号和密码)\n # 扫码登录然后,等待完成,完成的条件是屏幕上出现了某个\n\n search = WebDriverWait(driver, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#nav-searchform > div.nav-search-btn')))\n search.click()\n time.sleep(3)\n cookies = driver.get_cookies()\n # 获取当前脚本的路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(current_path, 'bilibili_cookies.json'), 'w') as f:\n # 写入当前文件\n f.write(json.dumps(cookies))\n # 写入成功\n self.logger.info('写入成功{}'.format(cookies))\n driver.quit()\n return\n\n def get_info_by_bv(self, bv):\n url = f\"https://api.bilibili.com/x/web-interface/view?bvid={str(bv)}\"\n\n def try_get(url):\n try:\n response = requests.get(url, headers=self.header, cookies=self.cookies)\n js_str = response.json()\n if js_str.get('code', 0) == 0:\n return js_str['data']\n else:\n # 可能需要根据API的设计,记录不同的错误\n self.logger.error(\n f\"Video API returned non-success code: {js_str.get('code', 'Unknown')} with message: {js_str.get('msg', 'Unknown')}\")\n except requests.exceptions.RequestException as e:\n self.logger.error(f\"An error occurred: {e}\")\n return None\n\n result = None\n retry_count = 10\n for _ in range(retry_count):\n result = try_get(url)\n if result:\n break\n\n return result\n\n # 检查url是否合法\n def check_url(self, url):\n if url.startswith(\"BV\"):\n return True\n elif url.startswith(\"https://www.bilibili.com/\"):\n return True\n else:\n return False" } ]
import logging import os import json import sys import requests from datetime import datetime from .protobuf import bili_pb2 as Danmaku from .bilibili_utils import bili_utils
12,137
headers = { 'authority': 'message.bilibili.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'no-cache', 'origin': 'https://www.bilibili.com', 'pragma': 'no-cache', 'referer': 'https://www.bilibili.com/', 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } # import bili_pb2 as Danmaku class Danmu: def __init__(self):
headers = { 'authority': 'message.bilibili.com', 'accept': 'application/json, text/plain, */*', 'accept-language': 'zh-CN,zh;q=0.9', 'cache-control': 'no-cache', 'origin': 'https://www.bilibili.com', 'pragma': 'no-cache', 'referer': 'https://www.bilibili.com/', 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36', } # import bili_pb2 as Danmaku class Danmu: def __init__(self):
self.utils = bili_utils()
1
2023-12-14 10:14:24+00:00
16k