Fecalisboa commited on
Commit
857c5a8
·
verified ·
1 Parent(s): 7bcad39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -20,7 +20,7 @@ api_token = os.getenv("HF_TOKEN")
20
 
21
 
22
 
23
- list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3"]
24
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
25
 
26
  # Load PDF document and create doc splits
@@ -67,14 +67,24 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
67
  max_new_tokens=max_tokens,
68
  top_k=top_k,
69
  )
70
- else:
 
71
  llm = HuggingFaceEndpoint(
 
72
  huggingfacehub_api_token=api_token,
73
- repo_id=llm_model,
74
  temperature=temperature,
75
  max_new_tokens=max_tokens,
76
  top_k=top_k,
77
  )
 
 
 
 
 
 
 
 
 
78
 
79
  progress(0.75, desc="Defining buffer memory...")
80
  memory = ConversationBufferMemory(
 
20
 
21
 
22
 
23
+ list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3","CohereForAI/aya-23-35B"]
24
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
25
 
26
  # Load PDF document and create doc splits
 
67
  max_new_tokens=max_tokens,
68
  top_k=top_k,
69
  )
70
+ elif:
71
+ llm_model1 == "mistralai/Mistral-7B-Instruct-v0.3":
72
  llm = HuggingFaceEndpoint(
73
+ repo_id=llm_model1,
74
  huggingfacehub_api_token=api_token,
 
75
  temperature=temperature,
76
  max_new_tokens=max_tokens,
77
  top_k=top_k,
78
  )
79
+ else:
80
+ llm_model2 == "CohereForAI/aya-23-35B":
81
+ llm = HuggingFaceEndpoint(
82
+ repo_id=llm_model2,
83
+ huggingfacehub_api_token=api_token,
84
+ temperature=temperature,
85
+ max_new_tokens=max_tokens,
86
+ top_k=top_k,
87
+ )
88
 
89
  progress(0.75, desc="Defining buffer memory...")
90
  memory = ConversationBufferMemory(