Divyanshu04's picture
Changes
073d257
raw
history blame
2.62 kB
from flask import Flask, jsonify, request
from pathlib import Path
import sys
import torch
import os
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler, DiffusionPipeline
import streamlit as st
import io
from PIL import Image
from huggingface_hub import login
# HF_TOKEN = os.environ.get("HF_TOKEN")
login(token='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')
# pipe = StableDiffusionPipeline.from_pretrained("Divyanshu04/Finetuned-model", safety_checker=None, torch_dtype=torch.float16).to("cpu")
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# pipe.enable_xformers_memory_efficient_attention() #if gpu is available
# g_cuda = None
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
app = Flask(__name__)
import requests
API_URL = "https://api-inference.huggingface.co/models/Divyanshu04/Finetuned-model"
headers = {"Authorization": "Bearer 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"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.content
# @app.route("/", methods=["POST"])
def generate():
with st.form(key="Form :", clear_on_submit = True):
prompt = st.text_area(label = "prompt", key="pmpt")
negative_prompt = st.text_area(label = "Negative prompt", key="ng_pmpt")
num_samples = st.number_input("No. of samples", step=1)
Submit = st.form_submit_button(label='Submit')
if Submit:
image_bytes = query({"inputs": prompt,})
# You can access the image with PIL.Image for example
image = Image.open(io.BytesIO(image_bytes))
# guidance_scale = 7.5
# num_inference_steps = 24
# height = 512
# width = 512
# g_cuda = torch.Generator(device='cpu')
# seed = 52362
# g_cuda.manual_seed(seed)
# with autocast("cpu"), torch.inference_mode():
# images = pipe(
# prompt,
# height=height,
# width=width,
# negative_prompt=negative_prompt,
# num_images_per_prompt=num_samples,
# num_inference_steps=num_inference_steps,
# guidance_scale=guidance_scale,
# generator=g_cuda
# ).images
st.image(image)
else:
st.write('<Enter parameters to generate image>')
# driver function
if __name__ == '__main__':
generate()