JPLTedCas commited on
Commit
7b4617e
·
verified ·
1 Parent(s): ca19ca5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -137,12 +137,18 @@ def get_vectorstore(text_chunks):
137
 
138
  def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
139
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
 
 
 
 
 
 
140
  llm = HuggingFaceHub(
141
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
142
- #repo_id="clibrain/lince-mistral-7b-it-es",
143
- #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
144
- model_kwargs={"temperature": 0.5, "max_length": 2096},#1048
145
  )
 
 
146
 
147
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
148
  conversation_chain = ConversationalRetrievalChain.from_llm(
 
137
 
138
  def get_conversation_chain(vectorstore:FAISS) -> ConversationalRetrievalChain:
139
  # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
140
+ #llm = HuggingFaceHub(
141
+ # repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
142
+ # #repo_id="clibrain/lince-mistral-7b-it-es",
143
+ # #repo_id="TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
144
+ # model_kwargs={"temperature": 0.5, "max_length": 2096},#1048
145
+ #)
146
  llm = HuggingFaceHub(
147
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
148
+ model_kwargs={"temperature": 0.5, "max_new_tokens": 1024, "max_length": 1048, "top_k": 3, "trust_remote_code": True, "torch_dtype": "auto"},
 
 
149
  )
150
+ # llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
151
+
152
 
153
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
154
  conversation_chain = ConversationalRetrievalChain.from_llm(