My-AI-Projects commited on
Commit
d13afd7
Β·
verified Β·
1 Parent(s): 7fc7e54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -25
app.py CHANGED
@@ -1,22 +1,26 @@
1
  import gradio as gr
2
- from gradio_client import Client
3
-
4
- # Initialize the client with the model endpoint
5
- client = Client("black-forest-labs/FLUX.1-dev")
6
-
7
- def generate_image(prompt, seed=0, randomize_seed=True, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28):
8
- # Make the API request
9
- result = client.predict(
10
- prompt=prompt,
11
- seed=seed,
12
- randomize_seed=randomize_seed,
13
- width=width,
14
- height=height,
15
- guidance_scale=guidance_scale,
16
- num_inference_steps=num_inference_steps,
17
- api_name="/infer"
18
- )
19
- return result
 
 
 
 
20
 
21
  # Define the Gradio interface
22
  with gr.Blocks() as demo:
@@ -24,11 +28,6 @@ with gr.Blocks() as demo:
24
 
25
  with gr.Row():
26
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here...")
27
- seed = gr.Slider(minimum=0, maximum=100000, step=1, value=0, label="Seed")
28
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
29
- width = gr.Slider(minimum=256, maximum=2048, step=32, value=1024, label="Width")
30
- height = gr.Slider(minimum=256, maximum=2048, step=32, value=1024, label="Height")
31
- guidance_scale = gr.Slider(minimum=1, maximum=15, step=0.1, value=3.5, label="Guidance Scale")
32
  num_inference_steps = gr.Slider(minimum=1, maximum=50, step=1, value=28, label="Number of Inference Steps")
33
 
34
  with gr.Row():
@@ -36,10 +35,10 @@ with gr.Blocks() as demo:
36
 
37
  result = gr.Image(label="Generated Image")
38
 
39
- # Define the button click action
40
  generate_button.click(
41
  fn=generate_image,
42
- inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
43
  outputs=result
44
  )
45
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import DalleMini, DalleMiniProcessor
4
+ from PIL import Image
5
+
6
+ # Load model and processor
7
+ model_id = "dalle-mini/dalle-mega"
8
+ model = DalleMini.from_pretrained(model_id)
9
+ processor = DalleMiniProcessor.from_pretrained(model_id)
10
+
11
+ # Function to generate image
12
+ def generate_image(prompt, num_inference_steps=50):
13
+ inputs = processor(prompt, return_tensors="pt")
14
+
15
+ # Generate images
16
+ with torch.no_grad():
17
+ outputs = model.generate(**inputs, num_inference_steps=num_inference_steps)
18
+
19
+ # Convert to PIL image
20
+ image = processor.decode(outputs[0], skip_special_tokens=True)
21
+ image = Image.open(io.BytesIO(image))
22
+
23
+ return image
24
 
25
  # Define the Gradio interface
26
  with gr.Blocks() as demo:
 
28
 
29
  with gr.Row():
30
  prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here...")
 
 
 
 
 
31
  num_inference_steps = gr.Slider(minimum=1, maximum=50, step=1, value=28, label="Number of Inference Steps")
32
 
33
  with gr.Row():
 
35
 
36
  result = gr.Image(label="Generated Image")
37
 
38
+ # Connect the function to the button
39
  generate_button.click(
40
  fn=generate_image,
41
+ inputs=[prompt, num_inference_steps],
42
  outputs=result
43
  )
44