Meismaxandmaxisme commited on
Commit
e37061c
·
verified ·
1 Parent(s): 380660f

Upload 4 files

Browse files
src/backend/models/device.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class DeviceInfo(BaseModel):
5
+ device_type: str
6
+ device_name: str
7
+ os: str
8
+ platform: str
9
+ processor: str
src/backend/models/gen_images.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from enum import Enum
3
+ from paths import FastStableDiffusionPaths
4
+
5
+
6
+ class ImageFormat(str, Enum):
7
+ """Image format"""
8
+
9
+ JPEG = "jpeg"
10
+ PNG = "png"
11
+
12
+
13
+ class GeneratedImages(BaseModel):
14
+ path: str = FastStableDiffusionPaths.get_results_path()
15
+ format: str = ImageFormat.PNG.value.upper()
16
+ save_image: bool = True
17
+ save_image_quality: int = 90
src/backend/models/lcmdiffusion_setting.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from PIL import Image
3
+ from typing import Any, Optional, Union
4
+
5
+ from constants import LCM_DEFAULT_MODEL, LCM_DEFAULT_MODEL_OPENVINO
6
+ from paths import FastStableDiffusionPaths
7
+ from pydantic import BaseModel
8
+
9
+
10
+ class LCMLora(BaseModel):
11
+ base_model_id: str = "Lykon/dreamshaper-8"
12
+ lcm_lora_id: str = "latent-consistency/lcm-lora-sdv1-5"
13
+
14
+
15
+ class DiffusionTask(str, Enum):
16
+ """Diffusion task types"""
17
+
18
+ text_to_image = "text_to_image"
19
+ image_to_image = "image_to_image"
20
+
21
+
22
+ class Lora(BaseModel):
23
+ models_dir: str = FastStableDiffusionPaths.get_lora_models_path()
24
+ path: Optional[Any] = None
25
+ weight: Optional[float] = 0.5
26
+ fuse: bool = True
27
+ enabled: bool = False
28
+
29
+
30
+ class ControlNetSetting(BaseModel):
31
+ adapter_path: Optional[str] = None # ControlNet adapter path
32
+ conditioning_scale: float = 0.5
33
+ enabled: bool = False
34
+ _control_image: Image = None # Control image, PIL image
35
+
36
+
37
+ class GGUFModel(BaseModel):
38
+ gguf_models: str = FastStableDiffusionPaths.get_gguf_models_path()
39
+ diffusion_path: Optional[str] = None
40
+ clip_path: Optional[str] = None
41
+ t5xxl_path: Optional[str] = None
42
+ vae_path: Optional[str] = None
43
+
44
+
45
+ class LCMDiffusionSetting(BaseModel):
46
+ lcm_model_id: str = LCM_DEFAULT_MODEL
47
+ openvino_lcm_model_id: str = LCM_DEFAULT_MODEL_OPENVINO
48
+ use_offline_model: bool = False
49
+ use_lcm_lora: bool = False
50
+ lcm_lora: Optional[LCMLora] = LCMLora()
51
+ use_tiny_auto_encoder: bool = False
52
+ use_openvino: bool = False
53
+ prompt: str = ""
54
+ negative_prompt: str = ""
55
+ init_image: Any = None
56
+ strength: Optional[float] = 0.6
57
+ image_height: Optional[int] = 512
58
+ image_width: Optional[int] = 512
59
+ inference_steps: Optional[int] = 1
60
+ guidance_scale: Optional[float] = 1
61
+ clip_skip: Optional[int] = 1
62
+ token_merging: Optional[float] = 0
63
+ number_of_images: Optional[int] = 1
64
+ seed: Optional[int] = 123123
65
+ use_seed: bool = False
66
+ use_safety_checker: bool = False
67
+ diffusion_task: str = DiffusionTask.text_to_image.value
68
+ lora: Optional[Lora] = Lora()
69
+ controlnet: Optional[Union[ControlNetSetting, list[ControlNetSetting]]] = None
70
+ dirs: dict = {
71
+ "controlnet": FastStableDiffusionPaths.get_controlnet_models_path(),
72
+ "lora": FastStableDiffusionPaths.get_lora_models_path(),
73
+ }
74
+ rebuild_pipeline: bool = False
75
+ use_gguf_model: bool = False
76
+ gguf_model: Optional[GGUFModel] = GGUFModel()
src/backend/models/upscale.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+
4
+ class UpscaleMode(str, Enum):
5
+ """Diffusion task types"""
6
+
7
+ normal = "normal"
8
+ sd_upscale = "sd_upscale"
9
+ aura_sr = "aura_sr"