Jonny001 commited on
Commit
1e98fc8
·
verified ·
1 Parent(s): 625e1fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -18
app.py CHANGED
@@ -4,7 +4,6 @@ import gradio as gr
4
  model_1 = gr.load("models/pimpilikipilapi1/NSFW_master")
5
  model_2 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
6
  model_3 = gr.load("models/prashanth970/flux-lora-uncensored")
7
- model_4 = gr.load("models/Purz/face-projection")
8
 
9
 
10
  default_negative_prompt = (
@@ -29,10 +28,7 @@ def generate_image_model_1(prompt, negative_prompt):
29
 
30
  def generate_image_model_2(prompt, negative_prompt):
31
  prompt += " 8k"
32
- try:
33
- return model_2(prompt, negative_prompt=negative_prompt)
34
- except TypeError:
35
- return model_2(prompt)
36
 
37
  def generate_image_model_3(prompt, negative_prompt):
38
  prompt += " 10k"
@@ -41,19 +37,11 @@ def generate_image_model_3(prompt, negative_prompt):
41
  except TypeError:
42
  return model_3(prompt)
43
 
44
- def generate_image_model_4(prompt, negative_prompt):
45
- try:
46
- return model_4(prompt)
47
- except TypeError:
48
- return model_4(prompt)
49
-
50
- # Gradio interface
51
  interface = gr.Interface(
52
  fn=lambda prompt, negative_prompt: (
53
  generate_image_model_1(prompt, negative_prompt),
54
  generate_image_model_2(prompt, negative_prompt),
55
- generate_image_model_3(prompt, negative_prompt),
56
- generate_image_model_4(prompt, negative_prompt),
57
  ),
58
  inputs=[
59
  gr.Textbox(label="Type your prompt here: ✍️", placeholder="Describe what you want..."),
@@ -63,10 +51,10 @@ interface = gr.Interface(
63
  gr.Image(label="Generated Image - Model 1"),
64
  gr.Image(label="Generated Image - Model 2"),
65
  gr.Image(label="Generated Image - Model 3"),
66
- gr.Image(label="Generated Image - Model 4"),
67
  ],
68
- title="Text to Image Generator",
69
- description="⚠️ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.",
 
70
  )
71
 
72
- interface.launch()
 
4
  model_1 = gr.load("models/pimpilikipilapi1/NSFW_master")
5
  model_2 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
6
  model_3 = gr.load("models/prashanth970/flux-lora-uncensored")
 
7
 
8
 
9
  default_negative_prompt = (
 
28
 
29
  def generate_image_model_2(prompt, negative_prompt):
30
  prompt += " 8k"
31
+ return model_2(prompt) # Removed negative_prompt
 
 
 
32
 
33
  def generate_image_model_3(prompt, negative_prompt):
34
  prompt += " 10k"
 
37
  except TypeError:
38
  return model_3(prompt)
39
 
 
 
 
 
 
 
 
40
  interface = gr.Interface(
41
  fn=lambda prompt, negative_prompt: (
42
  generate_image_model_1(prompt, negative_prompt),
43
  generate_image_model_2(prompt, negative_prompt),
44
+ generate_image_model_3(prompt, negative_prompt)
 
45
  ),
46
  inputs=[
47
  gr.Textbox(label="Type your prompt here: ✍️", placeholder="Describe what you want..."),
 
51
  gr.Image(label="Generated Image - Model 1"),
52
  gr.Image(label="Generated Image - Model 2"),
53
  gr.Image(label="Generated Image - Model 3"),
 
54
  ],
55
+ title="Text to Image (NSFW) 🔞",
56
+ theme="NoCrypt/miku",
57
+ description="⚠️ Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
58
  )
59
 
60
+ interface.launch()