Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,11 +5,11 @@ import torch
|
|
5 |
# Cargar modelo Qwen2.5
|
6 |
model_name = "Qwen/Qwen2.5-7B"
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.
|
9 |
|
10 |
# Funci贸n de respuesta
|
11 |
def chat(message):
|
12 |
-
inputs = tokenizer(message, return_tensors="pt")
|
13 |
output = model.generate(**inputs, max_new_tokens=50)
|
14 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
15 |
|
|
|
5 |
# Cargar modelo Qwen2.5
|
6 |
model_name = "Qwen/Qwen2.5-7B"
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32)
|
9 |
|
10 |
# Funci贸n de respuesta
|
11 |
def chat(message):
|
12 |
+
inputs = tokenizer(message, return_tensors="pt")
|
13 |
output = model.generate(**inputs, max_new_tokens=50)
|
14 |
return tokenizer.decode(output[0], skip_special_tokens=True)
|
15 |
|