acumplid commited on
Commit
88e6c95
·
1 Parent(s): fb8ab3e

Included venv

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -17,8 +17,8 @@ rag = RAG(
17
  hf_token=os.getenv("HF_TOKEN"),
18
  embeddings_model=os.getenv("EMBEDDINGS"),
19
  model_name=os.getenv("MODEL"),
20
-
21
-
22
  )
23
 
24
 
@@ -35,6 +35,7 @@ def generate(prompt, model_parameters):
35
  gr.Warning(
36
  "Inference endpoint is not available right now. Please try again later."
37
  )
 
38
 
39
 
40
  def submit_input(input_, num_chunks, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, temperature):
@@ -58,8 +59,9 @@ def submit_input(input_, num_chunks, max_new_tokens, repetition_penalty, top_k,
58
 
59
  for url in source:
60
  sources_markup += f'<a href="{url}" target="_blank">{url}</a><br>'
61
-
62
- return output.strip(), sources_markup, context
 
63
 
64
 
65
  def change_interactive(text):
 
17
  hf_token=os.getenv("HF_TOKEN"),
18
  embeddings_model=os.getenv("EMBEDDINGS"),
19
  model_name=os.getenv("MODEL"),
20
+ rerank_model=os.getenv("RERANK_MODEL"),
21
+ rerank_number_contexts=int(os.getenv("RERANK_NUMBER_CONTEXTS"))
22
  )
23
 
24
 
 
35
  gr.Warning(
36
  "Inference endpoint is not available right now. Please try again later."
37
  )
38
+ return None, None, None
39
 
40
 
41
  def submit_input(input_, num_chunks, max_new_tokens, repetition_penalty, top_k, top_p, do_sample, temperature):
 
59
 
60
  for url in source:
61
  sources_markup += f'<a href="{url}" target="_blank">{url}</a><br>'
62
+
63
+ return output, sources_markup, context
64
+ # return output.strip(), sources_markup, context
65
 
66
 
67
  def change_interactive(text):