Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,7 +15,7 @@ repo = "stabilityai/stable-diffusion-3-medium"
|
|
| 15 |
t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
|
| 16 |
|
| 17 |
model_id = "microsoft/Phi-3-medium-128k-instruct"
|
| 18 |
-
|
| 19 |
model_id,
|
| 20 |
device_map=device,
|
| 21 |
torch_dtype=torch.bfloat16,
|
|
@@ -23,19 +23,6 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 23 |
)
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 25 |
|
| 26 |
-
upsampler = pipeline(
|
| 27 |
-
"text-generation",
|
| 28 |
-
model=model,
|
| 29 |
-
tokenizer=tokenizer,
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
generation_args = {
|
| 33 |
-
"max_new_tokens": 300,
|
| 34 |
-
"return_full_text": False,
|
| 35 |
-
"temperature": 0.7,
|
| 36 |
-
"do_sample": True,
|
| 37 |
-
}
|
| 38 |
-
|
| 39 |
MAX_SEED = np.iinfo(np.int32).max
|
| 40 |
MAX_IMAGE_SIZE = 1344
|
| 41 |
|
|
@@ -54,8 +41,18 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 54 |
{"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
|
| 55 |
{"role": "user", "content": prompt},
|
| 56 |
]
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
print(upsampled_prompt)
|
| 61 |
|
|
|
|
| 15 |
t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
|
| 16 |
|
| 17 |
model_id = "microsoft/Phi-3-medium-128k-instruct"
|
| 18 |
+
upsampler = AutoModelForCausalLM.from_pretrained(
|
| 19 |
model_id,
|
| 20 |
device_map=device,
|
| 21 |
torch_dtype=torch.bfloat16,
|
|
|
|
| 23 |
)
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
MAX_SEED = np.iinfo(np.int32).max
|
| 27 |
MAX_IMAGE_SIZE = 1344
|
| 28 |
|
|
|
|
| 41 |
{"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
|
| 42 |
{"role": "user", "content": prompt},
|
| 43 |
]
|
| 44 |
+
tokenized_input = tokenizer.apply_chat_templete(messages, add_special_tokens=False, add_generate_prompt=True, return_tensors="pt").to(model.device)
|
| 45 |
+
with torch.inference_mode():
|
| 46 |
+
output = upsampler.generate(
|
| 47 |
+
tokenized_input,
|
| 48 |
+
max_new_tokens=512,
|
| 49 |
+
do_sample=True,
|
| 50 |
+
top_p=0.95,
|
| 51 |
+
temperature=0.7,
|
| 52 |
+
repetition_penalty=1.05,
|
| 53 |
+
)[0]
|
| 54 |
+
print(tokenizer.decode(output))
|
| 55 |
+
upsampled_prompt=output['generated_text']
|
| 56 |
|
| 57 |
print(upsampled_prompt)
|
| 58 |
|