Jackflack09 commited on
Commit
70d8cc5
·
1 Parent(s): e2cdcb6

Update requirements.txt

Browse files
Files changed (1) hide show
  1. requirements.txt +0 -58
requirements.txt CHANGED
@@ -1,58 +0,0 @@
1
- import torch
2
- from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
3
- from PIL import Image
4
- import gradio as gr
5
- import random
6
- import utils
7
-
8
- class Model:
9
- def __init__(self, name, path="", prefix=""):
10
- self.name = name
11
- self.path = path
12
- self.prefix = prefix
13
-
14
- class DiffusionApp:
15
- def __init__(self, models):
16
- self.models = models
17
- self.current_model = self.models[0]
18
- self.current_steps = 25
19
- self.is_colab = utils.is_google_colab()
20
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
21
- self.pipe = self.initialize_pipe(self.current_model.path)
22
-
23
- def initialize_pipe(self, model_path):
24
- pipe = StableDiffusionPipeline.from_pretrained(
25
- model_path,
26
- torch_dtype=torch.float16,
27
- scheduler=DPMSolverMultistepScheduler.from_pretrained(model_path, subfolder="scheduler")
28
- )
29
- pipe = pipe.to(self.device)
30
- if self.device == "cuda":
31
- pipe.enable_xformers_memory_efficient_attention()
32
- return pipe
33
-
34
- def update_model(self, model_name):
35
- for model in self.models:
36
- if model.name == model_name:
37
- self.current_model = model
38
- self.pipe = self.initialize_pipe(self.current_model.path)
39
-
40
- def inference(self, model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
41
- self.update_model(model_name)
42
- # Rest of your inference code here...
43
-
44
- # Initialize your models
45
- models = [
46
- Model("Vivid Watercolors", "Evel/VividWatercolors", "watercolor style"),
47
- Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
48
- # Add the rest of your models here...
49
- ]
50
-
51
- # Initialize your app
52
- app = DiffusionApp(models)
53
-
54
- # Use your app in your Gradio interface
55
- # For example:
56
- inputs = [gr.Dropdown(label="Model", choices=[m.name for m in app.models], value=app.current_model.name)]
57
- outputs = [gr.Image(height=512)]
58
- gr.Interface(app.inference, inputs, outputs).launch()