PablitoGil14 commited on
Commit
b02d5a7
·
verified ·
1 Parent(s): 0742a19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -22,7 +22,7 @@ vectordb = Chroma.from_documents(splits, embedding=embeddings)
22
  ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type="colbert")
23
 
24
  # Cargar modelo de lenguaje de Hugging Face
25
- model_id = "tiiuae/falcon-7b-instruct"
26
  tokenizer = AutoTokenizer.from_pretrained(model_id)
27
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
28
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
@@ -55,7 +55,7 @@ Pregunta: {query}
55
  Respuesta:"""
56
 
57
  # Generar respuesta
58
- output = generator(prompt, max_new_tokens=300, do_sample=False)[0]["generated_text"]
59
  response = output.split("Respuesta:")[-1].strip()
60
  return response
61
 
 
22
  ranker = Reranker("answerdotai/answerai-colbert-small-v1", model_type="colbert")
23
 
24
  # Cargar modelo de lenguaje de Hugging Face
25
+ model_id = "tiiuae/falcon-rw-1b"
26
  tokenizer = AutoTokenizer.from_pretrained(model_id)
27
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
28
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
55
  Respuesta:"""
56
 
57
  # Generar respuesta
58
+ output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
59
  response = output.split("Respuesta:")[-1].strip()
60
  return response
61