Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -28,7 +28,7 @@ MAX_IMAGE_SIZE = 1344
|
|
28 |
|
29 |
@spaces.GPU
|
30 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
|
31 |
-
|
32 |
if randomize_seed:
|
33 |
seed = random.randint(0, MAX_SEED)
|
34 |
|
@@ -41,10 +41,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
41 |
{"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
|
42 |
{"role": "user", "content": prompt},
|
43 |
]
|
44 |
-
tokenized_input = tokenizer.apply_chat_templete(messages,
|
45 |
with torch.inference_mode():
|
46 |
output = upsampler.generate(
|
47 |
-
tokenized_input,
|
48 |
max_new_tokens=512,
|
49 |
do_sample=True,
|
50 |
top_p=0.95,
|
@@ -52,7 +52,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
52 |
repetition_penalty=1.05,
|
53 |
)[0]
|
54 |
print(tokenizer.decode(output))
|
55 |
-
upsampled_prompt=output
|
56 |
|
57 |
print(upsampled_prompt)
|
58 |
|
|
|
28 |
|
29 |
@spaces.GPU
|
30 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
|
31 |
+
print("start inference...")
|
32 |
if randomize_seed:
|
33 |
seed = random.randint(0, MAX_SEED)
|
34 |
|
|
|
41 |
{"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
|
42 |
{"role": "user", "content": prompt},
|
43 |
]
|
44 |
+
tokenized_input = tokenizer.apply_chat_templete(messages, add_generation_prompt=True, return_tensors="pt")
|
45 |
with torch.inference_mode():
|
46 |
output = upsampler.generate(
|
47 |
+
tokenized_input.to(upsampler.device),
|
48 |
max_new_tokens=512,
|
49 |
do_sample=True,
|
50 |
top_p=0.95,
|
|
|
52 |
repetition_penalty=1.05,
|
53 |
)[0]
|
54 |
print(tokenizer.decode(output))
|
55 |
+
upsampled_prompt=tokenizer.decode(output)
|
56 |
|
57 |
print(upsampled_prompt)
|
58 |
|