Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,9 @@ from langchain_huggingface.llms import HuggingFacePipeline
|
|
20 |
from langchain_cerebras import ChatCerebras
|
21 |
from queue import Queue
|
22 |
from threading import Thread
|
|
|
|
|
|
|
23 |
|
24 |
# Configure logging
|
25 |
logging.basicConfig(level=logging.INFO)
|
@@ -98,10 +101,21 @@ retriever = db.as_retriever(
|
|
98 |
search_kwargs={"k": 5}
|
99 |
)
|
100 |
|
101 |
-
llm = ChatCerebras(
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
)
|
106 |
|
107 |
template = """
|
|
|
20 |
from langchain_cerebras import ChatCerebras
|
21 |
from queue import Queue
|
22 |
from threading import Thread
|
23 |
+
from langchain.chains import LLMChain
|
24 |
+
from langchain_core.prompts import PromptTemplate
|
25 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
26 |
|
27 |
# Configure logging
|
28 |
logging.basicConfig(level=logging.INFO)
|
|
|
101 |
search_kwargs={"k": 5}
|
102 |
)
|
103 |
|
104 |
+
# llm = ChatCerebras(
|
105 |
+
# model="llama-3.3-70b",
|
106 |
+
# api_key=C_apikey,
|
107 |
+
# streaming=True
|
108 |
+
# )
|
109 |
+
|
110 |
+
repo_id = "Qwen/QwQ-32B-Preview"
|
111 |
+
|
112 |
+
llm = HuggingFaceEndpoint(
|
113 |
+
repo_id=repo_id,
|
114 |
+
max_length=8192,
|
115 |
+
temperature=0.2,
|
116 |
+
huggingfacehub_api_token=HF_TOKEN,
|
117 |
+
streaming=True,
|
118 |
+
|
119 |
)
|
120 |
|
121 |
template = """
|