Spaces:
Sleeping
Sleeping
Update app_config.py
Browse files- app_config.py +6 -1
app_config.py
CHANGED
@@ -6,6 +6,11 @@ from langchain.document_loaders import PyPDFLoader
|
|
6 |
from langchain.memory import ConversationSummaryBufferMemory
|
7 |
from langchain_groq import ChatGroq
|
8 |
import os
|
|
|
|
|
|
|
|
|
|
|
9 |
tokenizer = tiktoken.get_encoding('cl100k_base')
|
10 |
FILE_NAMEs = os.listdir('data')
|
11 |
|
@@ -76,7 +81,7 @@ def get_vectorstore():
|
|
76 |
return vectorstore
|
77 |
|
78 |
|
79 |
-
chat = ChatGroq(temperature=0, groq_api_key="
|
80 |
rag_memory = ConversationSummaryBufferMemory(llm=chat, max_token_limit=3000)
|
81 |
|
82 |
my_vector_store = get_vectorstore()
|
|
|
6 |
from langchain.memory import ConversationSummaryBufferMemory
|
7 |
from langchain_groq import ChatGroq
|
8 |
import os
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
|
11 |
+
|
12 |
+
# Load environment variables from .env file
|
13 |
+
load_dotenv()
|
14 |
tokenizer = tiktoken.get_encoding('cl100k_base')
|
15 |
FILE_NAMEs = os.listdir('data')
|
16 |
|
|
|
81 |
return vectorstore
|
82 |
|
83 |
|
84 |
+
chat = ChatGroq(temperature=0, groq_api_key=os.getenv("GROQ_API_KEY"), model_name="llama3-8b-8192", streaming=True)
|
85 |
rag_memory = ConversationSummaryBufferMemory(llm=chat, max_token_limit=3000)
|
86 |
|
87 |
my_vector_store = get_vectorstore()
|