Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -67,11 +67,12 @@ def main():
|
|
67 |
#PATH = 'model/'
|
68 |
#llm = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
|
69 |
llm = huggingface_hub.HuggingFaceHub(repo_id="openai-community/gpt2-medium",model_kwargs={"temperature":1.0, "max_length":500})
|
70 |
-
docs = vector_store.similarity_search(query=query, k=1)
|
71 |
-
st.write(docs)
|
72 |
#chain = load_qa_chain(llm=llm, chain_type="stuff")
|
73 |
#response = chain.run(input_documents=docs, question=query)
|
74 |
retriever=vector_store.as_retriever(search_kargs={"k":1})
|
|
|
75 |
chain = RetrievalQA.from_chain_type(llm=llm,chain_type="stuff",retriever=retriever)
|
76 |
response = chain.run(query)
|
77 |
st.write(response)
|
|
|
67 |
#PATH = 'model/'
|
68 |
#llm = AutoModelForCausalLM.from_pretrained("openai-community/gpt2-medium")
|
69 |
llm = huggingface_hub.HuggingFaceHub(repo_id="openai-community/gpt2-medium",model_kwargs={"temperature":1.0, "max_length":500})
|
70 |
+
#docs = vector_store.similarity_search(query=query, k=1)
|
71 |
+
#st.write(docs)
|
72 |
#chain = load_qa_chain(llm=llm, chain_type="stuff")
|
73 |
#response = chain.run(input_documents=docs, question=query)
|
74 |
retriever=vector_store.as_retriever(search_kargs={"k":1})
|
75 |
+
st.write(retriever)
|
76 |
chain = RetrievalQA.from_chain_type(llm=llm,chain_type="stuff",retriever=retriever)
|
77 |
response = chain.run(query)
|
78 |
st.write(response)
|