Update pages/bot.py
Browse files- pages/bot.py +0 -59
pages/bot.py
CHANGED
@@ -79,15 +79,6 @@ def get_vectorstore():
|
|
79 |
return vectorstoreDB
|
80 |
|
81 |
######
|
82 |
-
"""
|
83 |
-
def get_conversation_chain(vectorstore):
|
84 |
-
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
|
85 |
-
conversation_chain = ConversationalRetrievalChain.from_llm(
|
86 |
-
llm=llm,
|
87 |
-
retriever=vectorstore.as_retriever()
|
88 |
-
)
|
89 |
-
return conversation_chain
|
90 |
-
"""
|
91 |
|
92 |
|
93 |
#####
|
@@ -108,15 +99,6 @@ def main():
|
|
108 |
st.text("Das ist der Kontext:")
|
109 |
st.text(context)
|
110 |
|
111 |
-
##IDEE Text Generation
|
112 |
-
#generator = pipeline('text-generation', model = 'gpt2')
|
113 |
-
#answer = generator(context, max_length = 30, num_return_sequences=3)
|
114 |
-
|
115 |
-
#st.text("FORMATIERTE ANTWORT:")
|
116 |
-
#st.text_area()
|
117 |
-
#st.text(answer)
|
118 |
-
#st.text(type(answer))
|
119 |
-
|
120 |
# Erstelle die Question Answering-Pipeline für Deutsch
|
121 |
qa_pipeline = pipeline("question-answering", model="deutsche-telekom/bert-multi-english-german-squad2", tokenizer="deutsche-telekom/bert-multi-english-german-squad2")
|
122 |
|
@@ -128,47 +110,6 @@ def main():
|
|
128 |
st.text(answer["answer"])
|
129 |
st.text(answer)
|
130 |
|
131 |
-
######
|
132 |
-
|
133 |
-
#newA = get_conversation_chain(get_vectorstore())
|
134 |
-
#st.text(newA)
|
135 |
-
|
136 |
-
"""
|
137 |
-
generator = pipeline('text-generation', model = 'tiiuae/falcon-40b')
|
138 |
-
generator(answer, max_length = 30, num_return_sequences=3)
|
139 |
-
st.text("Generierte Erweiterung:")
|
140 |
-
st.text(generator)
|
141 |
-
"""
|
142 |
-
|
143 |
-
"""
|
144 |
-
#IDEE Retriever erweitern
|
145 |
-
template = Answer the question based only on the following context:
|
146 |
-
|
147 |
-
{context}
|
148 |
-
|
149 |
-
Question: {question}
|
150 |
-
|
151 |
-
prompt = ChatPromptTemplate.from_template(template)
|
152 |
-
model = AutoModel.from_pretrained("hkunlp/instructor-base")
|
153 |
-
|
154 |
-
|
155 |
-
def format_docs(docs):
|
156 |
-
return "\n\n".join([d.page_content for d in docs])
|
157 |
-
|
158 |
-
|
159 |
-
chain = (
|
160 |
-
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
161 |
-
| prompt
|
162 |
-
| model
|
163 |
-
| StrOutputParser()
|
164 |
-
)
|
165 |
-
|
166 |
-
ausgabetext = chain.invoke(user_question)
|
167 |
-
st.text(ausgabetext)
|
168 |
-
"""
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
|
173 |
if __name__ == '__main__':
|
174 |
main()
|
|
|
79 |
return vectorstoreDB
|
80 |
|
81 |
######
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
|
84 |
#####
|
|
|
99 |
st.text("Das ist der Kontext:")
|
100 |
st.text(context)
|
101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
# Erstelle die Question Answering-Pipeline für Deutsch
|
103 |
qa_pipeline = pipeline("question-answering", model="deutsche-telekom/bert-multi-english-german-squad2", tokenizer="deutsche-telekom/bert-multi-english-german-squad2")
|
104 |
|
|
|
110 |
st.text(answer["answer"])
|
111 |
st.text(answer)
|
112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
if __name__ == '__main__':
|
115 |
main()
|