Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ vectordb = Chroma.from_documents(splits, embedding=embeddings)
|
|
22 |
ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type="colbert")
|
23 |
|
24 |
# Cargar modelo de lenguaje de Hugging Face
|
25 |
-
model_id = "tiiuae/falcon-
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
27 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
|
28 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
@@ -55,7 +55,7 @@ Pregunta: {query}
|
|
55 |
Respuesta:"""
|
56 |
|
57 |
# Generar respuesta
|
58 |
-
output = generator(prompt, max_new_tokens=
|
59 |
response = output.split("Respuesta:")[-1].strip()
|
60 |
return response
|
61 |
|
|
|
22 |
ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type="colbert")
|
23 |
|
24 |
# Cargar modelo de lenguaje de Hugging Face
|
25 |
+
model_id = "tiiuae/falcon-rw-1b"
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
27 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
|
28 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
55 |
Respuesta:"""
|
56 |
|
57 |
# Generar respuesta
|
58 |
+
output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
|
59 |
response = output.split("Respuesta:")[-1].strip()
|
60 |
return response
|
61 |
|