Update app.py
Browse files
app.py
CHANGED
|
@@ -137,7 +137,6 @@ def load_model(_docs):
|
|
| 137 |
template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
|
| 138 |
|
| 139 |
prompt = PromptTemplate(template=template, input_variables=["context", "question"]) #Add history here
|
| 140 |
-
streamer = transformers.TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
| 141 |
|
| 142 |
qa_chain = RetrievalQA.from_chain_type(
|
| 143 |
llm=llm,
|
|
@@ -146,7 +145,6 @@ def load_model(_docs):
|
|
| 146 |
return_source_documents=True,
|
| 147 |
chain_type_kwargs={"prompt": prompt,
|
| 148 |
"verbose": False,
|
| 149 |
-
"streamer":streamer,
|
| 150 |
#"memory": ConversationBufferMemory(
|
| 151 |
#memory_key="history",
|
| 152 |
#input_key="question",
|
|
|
|
| 137 |
template = generate_prompt("""{context} Question: {question} """,system_prompt=SYSTEM_PROMPT,) #Enter memory here!
|
| 138 |
|
| 139 |
prompt = PromptTemplate(template=template, input_variables=["context", "question"]) #Add history here
|
|
|
|
| 140 |
|
| 141 |
qa_chain = RetrievalQA.from_chain_type(
|
| 142 |
llm=llm,
|
|
|
|
| 145 |
return_source_documents=True,
|
| 146 |
chain_type_kwargs={"prompt": prompt,
|
| 147 |
"verbose": False,
|
|
|
|
| 148 |
#"memory": ConversationBufferMemory(
|
| 149 |
#memory_key="history",
|
| 150 |
#input_key="question",
|