sagar4tech commited on
Commit
5626163
·
verified ·
1 Parent(s): 19356d7

Upload 2 files

Browse files
Files changed (2) hide show
  1. inference.py +31 -0
  2. main.py +30 -0
inference.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #### text to Image on Gradio UI
3
+ import torch
4
+ from diffusers import DiffusionPipeline
5
+ import gradio as gr
6
+
7
+
8
+ # Provide the file path to your locally stored model file
9
+ model_file_path = "/model/file/path/to/model/directory/"
10
+
11
+ # Provide the url to model id
12
+ url = "runwayml/stable-diffusion-v1-5"
13
+
14
+ # Load the safetensor model
15
+ pipe = DiffusionPipeline.from_pretrained(url)
16
+ # Metal performance shader(mps) to optimize infernece using Mac built-in gpu : pipe = pipe.to("mps")
17
+
18
+ def predict(text):
19
+ # Ensure pipe(text) returns the correct output format
20
+ generated_image = pipe(text).images[0]
21
+ return generated_image
22
+
23
+
24
+
25
+ demo = gr.Interface(
26
+ fn=predict,
27
+ inputs='text',
28
+ outputs='image',
29
+ )
30
+
31
+ demo.launch(server_name="0.0.0.0", server_port=7000)
main.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ## mounting Gradio on fast api
3
+
4
+ from fastapi import FastAPI
5
+ import gradio as gr
6
+ from Model.inference import demo
7
+
8
+ # Define your predict function
9
+ def predict(text):
10
+ # Your prediction logic here
11
+ return text[::-1] # Just an example, reverse the input text
12
+
13
+ # Create a Gradio interface
14
+ demo = gr.Interface(
15
+ fn=predict,
16
+ inputs='text',
17
+ outputs='text',
18
+ title='Text Reversal' # Add a title for the Gradio UI
19
+ )
20
+
21
+ # Create a FastAPI app
22
+ app = FastAPI()
23
+
24
+ # Define your root route
25
+ @app.get('/')
26
+ async def root():
27
+ return 'Gradio is running', 200
28
+
29
+ # Mount the Gradio interface onto the FastAPI app
30
+ app = gr.mount_gradio_app(app, demo, path='/gradio')