File size: 2,616 Bytes
5b512a0
 
 
 
 
 
7ce3041
5b512a0
073d257
 
c1d6f67
6a49dc6
2be6eb1
6a49dc6
522dbe7
d69d53c
 
073d257
6a49dc6
073d257
6d87ed3
073d257
5b512a0
 
 
 
 
 
 
 
073d257
 
 
 
 
 
 
 
 
 
5b512a0
 
95c1ed8
5b512a0
 
02f0f1d
 
 
 
5b512a0
02f0f1d
5b512a0
02f0f1d
5b512a0
073d257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b512a0
073d257
5b512a0
 
ce82d39
5b512a0
 
 
 
 
95c1ed8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
from flask import Flask, jsonify, request
from pathlib import Path
import sys
import torch
import os
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler,  DiffusionPipeline
import streamlit as st
import io
from PIL import Image
from huggingface_hub import login

# HF_TOKEN = os.environ.get("HF_TOKEN")

login(token='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')


# pipe = StableDiffusionPipeline.from_pretrained("Divyanshu04/Finetuned-model", safety_checker=None, torch_dtype=torch.float16).to("cpu")

# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
# pipe.enable_xformers_memory_efficient_attention()  #if gpu is available
# g_cuda = None
         
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))

app = Flask(__name__)

import requests

API_URL = "https://api-inference.huggingface.co/models/Divyanshu04/Finetuned-model"
headers = {"Authorization": "Bearer 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"}

def query(payload):
	response = requests.post(API_URL, headers=headers, json=payload)
	return response.content

  

# @app.route("/", methods=["POST"])
def generate():

    with st.form(key="Form :", clear_on_submit = True):
        prompt = st.text_area(label = "prompt", key="pmpt")
        negative_prompt = st.text_area(label = "Negative prompt", key="ng_pmpt")
        num_samples = st.number_input("No. of samples", step=1)

        Submit = st.form_submit_button(label='Submit')

    if Submit:

        image_bytes = query({"inputs": prompt,})
        # You can access the image with PIL.Image for example

        image = Image.open(io.BytesIO(image_bytes))

        # guidance_scale = 7.5
        # num_inference_steps = 24
        # height = 512
        # width = 512

        # g_cuda = torch.Generator(device='cpu')
        # seed = 52362
        # g_cuda.manual_seed(seed)


        # with autocast("cpu"), torch.inference_mode():
        #     images = pipe(
        #         prompt,
        #         height=height,
        #         width=width,
        #         negative_prompt=negative_prompt,
        #         num_images_per_prompt=num_samples,
        #         num_inference_steps=num_inference_steps,
        #         guidance_scale=guidance_scale,
        #         generator=g_cuda
        #     ).images
        
        st.image(image)
    
    else:
        st.write('<Enter parameters to generate image>')



  
# driver function
if __name__ == '__main__':
    generate()