Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,14 +6,14 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
6 |
from huggingface_hub import login
|
7 |
# API Key de Hugging Face
|
8 |
huggingface_token = st.secrets["FIREWORKS"]
|
9 |
-
# Autenticar
|
10 |
#login(api_key)
|
11 |
|
12 |
|
13 |
# Configurar modelo Llama 3.1
|
14 |
model_id = "meta-llama/Llama-3.2-1B"
|
15 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
16 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",
|
17 |
|
18 |
# Crear pipeline con Fireworks
|
19 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1024)
|
|
|
6 |
from huggingface_hub import login
|
7 |
# API Key de Hugging Face
|
8 |
huggingface_token = st.secrets["FIREWORKS"]
|
9 |
+
login(huggingface_token)# Autenticar
|
10 |
#login(api_key)
|
11 |
|
12 |
|
13 |
# Configurar modelo Llama 3.1
|
14 |
model_id = "meta-llama/Llama-3.2-1B"
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_i)
|
16 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16)
|
17 |
|
18 |
# Crear pipeline con Fireworks
|
19 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=1024)
|