Divyanshu04 commited on
Commit
c509b59
·
1 Parent(s): 0ce1c84
Files changed (1) hide show
  1. app.py +26 -43
app.py CHANGED
@@ -15,11 +15,11 @@ from huggingface_hub import login
15
  login(token='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')
16
 
17
 
18
- # pipe = StableDiffusionPipeline.from_pretrained("Divyanshu04/Finetuned-model", safety_checker=None, torch_dtype=torch.float16).to("cpu")
19
 
20
- # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
21
  # pipe.enable_xformers_memory_efficient_attention() #if gpu is available
22
- # g_cuda = None
23
 
24
  FILE = Path(__file__).resolve()
25
  ROOT = FILE.parents[0] # YOLOv5 root directory
@@ -29,16 +29,6 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
29
 
30
  app = Flask(__name__)
31
 
32
- import requests
33
-
34
- API_URL = "https://api-inference.huggingface.co/models/Divyanshu04/Finetuned-model"
35
- headers = {"Authorization": "Bearer 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"}
36
-
37
- def query(payload):
38
- response = requests.post(API_URL, headers=headers, json=payload)
39
- return response.content
40
-
41
-
42
 
43
  # @app.route("/", methods=["POST"])
44
  def generate():
@@ -52,37 +42,30 @@ def generate():
52
 
53
  if Submit:
54
 
55
- image_bytes = query({"inputs": prompt,})
56
-
57
- img_io = io.BytesIO(image_bytes)
58
-
59
- image = Image.open(img_io)
60
- img_io.seek(0)
61
- byteImg = img_io.read()
62
-
63
- # guidance_scale = 7.5
64
- # num_inference_steps = 24
65
- # height = 512
66
- # width = 512
67
-
68
- # g_cuda = torch.Generator(device='cpu')
69
- # seed = 52362
70
- # g_cuda.manual_seed(seed)
71
-
72
-
73
- # with autocast("cpu"), torch.inference_mode():
74
- # images = pipe(
75
- # prompt,
76
- # height=height,
77
- # width=width,
78
- # negative_prompt=negative_prompt,
79
- # num_images_per_prompt=num_samples,
80
- # num_inference_steps=num_inference_steps,
81
- # guidance_scale=guidance_scale,
82
- # generator=g_cuda
83
- # ).images
84
 
85
- st.image(byteImg)
86
 
87
  else:
88
  st.write('<Enter parameters to generate image>')
 
15
  login(token='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')
16
 
17
 
18
+ pipe = StableDiffusionPipeline.from_pretrained("Divyanshu04/Finetuned-model", safety_checker=None, torch_dtype=torch.float16).to("cpu")
19
 
20
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
21
  # pipe.enable_xformers_memory_efficient_attention() #if gpu is available
22
+ g_cuda = None
23
 
24
  FILE = Path(__file__).resolve()
25
  ROOT = FILE.parents[0] # YOLOv5 root directory
 
29
 
30
  app = Flask(__name__)
31
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # @app.route("/", methods=["POST"])
34
  def generate():
 
42
 
43
  if Submit:
44
 
45
+
46
+ guidance_scale = 7.5
47
+ num_inference_steps = 24
48
+ height = 512
49
+ width = 512
50
+
51
+ g_cuda = torch.Generator(device='cpu')
52
+ seed = 52362
53
+ g_cuda.manual_seed(seed)
54
+
55
+
56
+ with autocast("cpu"), torch.inference_mode():
57
+ images = pipe(
58
+ prompt,
59
+ height=height,
60
+ width=width,
61
+ negative_prompt=negative_prompt,
62
+ num_images_per_prompt=num_samples,
63
+ num_inference_steps=num_inference_steps,
64
+ guidance_scale=guidance_scale,
65
+ generator=g_cuda
66
+ ).images
 
 
 
 
 
 
 
67
 
68
+ st.image(images)
69
 
70
  else:
71
  st.write('<Enter parameters to generate image>')