Spaces:
Sleeping
Sleeping
File size: 4,391 Bytes
27e25ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import os
import pymssql
import pandas as pd
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import CSVLoader
from langchain.memory import ConversationBufferMemory
def Loading():
return "๋ฐ์ดํฐ ๋ก๋ฉ ์ค..."
def LoadData(openai_key):
if openai_key is not None:
persist_directory = 'realdb_LLM'
embedding = OpenAIEmbeddings()
vectordb = Chroma(
persist_directory=persist_directory,
embedding_function=embedding
)
global retriever
retriever = vectordb.as_retriever(search_kwargs={"k": 1})
return "์ค๋น ์๋ฃ"
else:
return "์ฌ์ฉํ์๋ API Key๋ฅผ ์
๋ ฅํ์ฌ ์ฃผ์๊ธฐ ๋ฐ๋๋๋ค."
def process_llm_response(llm_response):
print(llm_response['result'])
print('\n\nSources:')
for source in llm_response["source_documents"]:
print(source.metadata['source'])
# ์ฑ๋ด์ ๋ต๋ณ์ ์ฒ๋ฆฌํ๋ ํจ์
def respond(message, chat_history, temperature):
try:
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(temperature=0.4),
# llm=OpenAI(temperature=0.4),
# llm=ChatOpenAI(temperature=0),
chain_type="stuff",
retriever=retriever
)
result = qa_chain(message)
bot_message = result['result']
# bot_message += '\n\n' + ' [์ถ์ฒ]'
# # ๋ต๋ณ์ ์ถ์ฒ๋ฅผ ํ๊ธฐ
# for i, doc in enumerate(result['source_documents']):
# bot_message += str(i+1) + '. ' + doc.metadata['source'] + ' '
# ์ฑํ
๊ธฐ๋ก์ ์ฌ์ฉ์์ ๋ฉ์์ง์ ๋ด์ ์๋ต์ ์ถ๊ฐ.
chat_history.append((message, bot_message))
return "", chat_history
except:
chat_history.append(("", "API Key ์
๋ ฅ ์๋ง"))
return " ", chat_history
# return "", chat_history
import gradio as gr
# ์ฑ๋ด ์ค๋ช
title = """
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div>
<h1>Pretraining Chatbot V2 Real</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
OpenAI LLM๋ฅผ ์ด์ฉํ Chatbot (Similarity)
</p>
</div>
"""
# ๊พธ๋ฏธ๊ธฐ
css="""
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
"""
with gr.Blocks(css=css) as UnivChatbot:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
with gr.Row():
with gr.Column(scale=3):
openai_key = gr.Textbox(label="You OpenAI API key", type="password", placeholder="OpenAI Key Type", elem_id="InputKey", show_label=False, container=False)
with gr.Column(scale=1):
langchain_status = gr.Textbox(placeholder="Status", interactive=False, show_label=False, container=False)
with gr.Column(scale=1):
chk_key = gr.Button("ํ์ธ", variant="primary")
chatbot = gr.Chatbot(label="๋ํ ์ฑ๋ด์์คํ
(OpenAI LLM)", elem_id="chatbot") # ์๋จ ์ข์ธก
with gr.Row():
with gr.Column(scale=9):
msg = gr.Textbox(label="์
๋ ฅ", placeholder="๊ถ๊ธํ์ ๋ด์ญ์ ์
๋ ฅํ์ฌ ์ฃผ์ธ์.", elem_id="InputQuery", show_label=False, container=False)
with gr.Row():
with gr.Column(scale=1):
submit = gr.Button("์ ์ก", variant="primary")
with gr.Column(scale=1):
clear = gr.Button("์ด๊ธฐํ", variant="stop")
#chk_key.click(Loading, None, langchain_status, queue=False)
chk_key.click(LoadData, openai_key, outputs=[langchain_status], queue=False)
# ์ฌ์ฉ์์ ์
๋ ฅ์ ์ ์ถ(submit)ํ๋ฉด respond ํจ์๊ฐ ํธ์ถ.
msg.submit(respond, [msg, chatbot], [msg, chatbot])
submit.click(respond, [msg, chatbot], [msg, chatbot])
# '์ด๊ธฐํ' ๋ฒํผ์ ํด๋ฆญํ๋ฉด ์ฑํ
๊ธฐ๋ก์ ์ด๊ธฐํ.
clear.click(lambda: None, None, chatbot, queue=False)
UnivChatbot.launch() |