Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,8 @@ from threading import Thread
|
|
20 |
from langchain.chains import LLMChain
|
21 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
22 |
from langchain_core.prompts import PromptTemplate
|
|
|
|
|
23 |
|
24 |
logging.basicConfig(level=logging.INFO)
|
25 |
logger = logging.getLogger(__name__)
|
@@ -54,6 +56,7 @@ load_dotenv()
|
|
54 |
|
55 |
OPENAPI_KEY = os.getenv("OPENAPI_KEY")
|
56 |
CHUTES_KEY = os.getenv("CHUTES_KEY")
|
|
|
57 |
|
58 |
|
59 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
@@ -145,7 +148,7 @@ def log_to_qdrant(question: str, answer: str):
|
|
145 |
|
146 |
llm = ChatGoogleGenerativeAI(
|
147 |
model="gemini-2.5-flash",
|
148 |
-
temperature=0.
|
149 |
max_tokens=None,
|
150 |
timeout=None,
|
151 |
max_retries=2,
|
@@ -155,6 +158,7 @@ llm = ChatGoogleGenerativeAI(
|
|
155 |
|
156 |
|
157 |
|
|
|
158 |
# llm = ChatOpenAI(
|
159 |
# model="meta-llama/llama-4-maverick:free",
|
160 |
# temperature=0.1,
|
|
|
20 |
from langchain.chains import LLMChain
|
21 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
22 |
from langchain_core.prompts import PromptTemplate
|
23 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
24 |
+
from langchain_qdrant import Qdrant
|
25 |
|
26 |
logging.basicConfig(level=logging.INFO)
|
27 |
logger = logging.getLogger(__name__)
|
|
|
56 |
|
57 |
OPENAPI_KEY = os.getenv("OPENAPI_KEY")
|
58 |
CHUTES_KEY = os.getenv("CHUTES_KEY")
|
59 |
+
GEMINI=os.getenv("GEMINI_API_KEY")
|
60 |
|
61 |
|
62 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
|
|
148 |
|
149 |
llm = ChatGoogleGenerativeAI(
|
150 |
model="gemini-2.5-flash",
|
151 |
+
temperature=0.2,
|
152 |
max_tokens=None,
|
153 |
timeout=None,
|
154 |
max_retries=2,
|
|
|
158 |
|
159 |
|
160 |
|
161 |
+
|
162 |
# llm = ChatOpenAI(
|
163 |
# model="meta-llama/llama-4-maverick:free",
|
164 |
# temperature=0.1,
|