Spaces:
Runtime error
Runtime error
File size: 2,385 Bytes
5b512a0 7ce3041 5b512a0 d69d53c 6a49dc6 d69d53c 5b512a0 ef11887 6a49dc6 11e5f93 5b512a0 95c1ed8 5b512a0 02f0f1d 5b512a0 02f0f1d 5b512a0 02f0f1d 5b512a0 ce82d39 5b512a0 95c1ed8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
from flask import Flask, jsonify, request
from pathlib import Path
import sys
import torch
import os
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler, DiffusionPipeline
import streamlit as st
from huggingface_hub import login
HF_TOKEN = os.environ.get("HF_TOKEN")
login(token = HF_TOKEN)
# model_path = WEIGHTS_DIR # If you want to use previously trained model saved in gdrive, replace this with the full path of model in gdrive
# headers = {"Authorization": "Bearer xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}
pipe = StableDiffusionPipeline.from_pretrained("Divyanshu04/Finetuned-model", safety_checker=None, torch_dtype=torch.float32).to("cuda")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.enable_xformers_memory_efficient_attention()
g_cuda = None
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
app = Flask(__name__)
# @app.route("/", methods=["POST"])
def generate():
with st.form(key="Form :", clear_on_submit = True):
prompt = st.text_area(label = "prompt", key="pmpt")
negative_prompt = st.text_area(label = "Negative prompt", key="ng_pmpt")
num_samples = st.number_input("No. of samples", step=1)
Submit = st.form_submit_button(label='Submit')
if Submit:
guidance_scale = 7.5
num_inference_steps = 24
height = 512
width = 512
g_cuda = torch.Generator(device='cuda')
seed = 52362
g_cuda.manual_seed(seed)
# commandline_args = os.environ.get('COMMANDLINE_ARGS', "--skip-torch-cuda-test --no-half")
with autocast("cuda"), torch.inference_mode():
images = pipe(
prompt,
height=height,
width=width,
negative_prompt=negative_prompt,
num_images_per_prompt=num_samples,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
generator=g_cuda
).images
return {"message": "successful"}
else:
st.write('<Enter parameters to generate image>')
# driver function
if __name__ == '__main__':
generate() |