Update app.py
Browse files
app.py
CHANGED
@@ -2,23 +2,19 @@ import streamlit as st
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
from langchain.vectorstores import Chroma
|
5 |
-
import
|
6 |
import psutil
|
|
|
7 |
|
8 |
-
# ๋ชจ๋ธ ID
|
9 |
model_id = "hewoo/hehehehe"
|
10 |
|
11 |
-
#
|
12 |
-
def monitor_memory():
|
13 |
-
memory_info = psutil.virtual_memory()
|
14 |
-
st.write(f"ํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋: {memory_info.percent}%")
|
15 |
-
|
16 |
-
# ์บ์๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ ๋ฐ ํ์ดํ๋ผ์ธ ๋ก๋
|
17 |
@st.cache_resource
|
18 |
def load_model():
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
21 |
-
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150
|
22 |
|
23 |
# ์ฌ์ฉ์ ์ ์ ์๋ฒ ๋ฉ ํด๋์ค
|
24 |
class CustomEmbedding:
|
@@ -31,13 +27,13 @@ class CustomEmbedding:
|
|
31 |
def embed_documents(self, texts):
|
32 |
return [self.model.encode(text, convert_to_tensor=True).tolist() for text in texts]
|
33 |
|
34 |
-
# ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ฐ ๋ฒกํฐ ์คํ ์ด ์ค์
|
35 |
@st.cache_resource
|
36 |
def load_embedding_model():
|
37 |
-
return SentenceTransformer("
|
38 |
|
39 |
@st.cache_resource
|
40 |
-
def load_vectorstore(_embedding_model):
|
41 |
embedding_function = CustomEmbedding(_embedding_model)
|
42 |
return Chroma(persist_directory="./chroma_batch_vectors", embedding_function=embedding_function)
|
43 |
|
@@ -46,9 +42,22 @@ def generate_response(user_input):
|
|
46 |
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
47 |
search_results = retriever.get_relevant_documents(user_input)
|
48 |
context = "\n".join([result.page_content for result in search_results])
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# ๋ชจ๋ธ ๋ฐ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋
|
54 |
pipe = load_model()
|
@@ -56,20 +65,21 @@ embedding_model = load_embedding_model()
|
|
56 |
vectorstore = load_vectorstore(embedding_model)
|
57 |
|
58 |
# Streamlit ์ฑ UI
|
59 |
-
st.title("์ฑ๋ด
|
60 |
-
st.write("
|
61 |
-
|
62 |
-
monitor_memory() # ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋ ํ์ธ
|
63 |
|
64 |
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ๊ธฐ
|
65 |
user_input = st.text_input("์ง๋ฌธ")
|
66 |
if user_input:
|
67 |
-
response = generate_response(user_input)
|
68 |
st.write("์ฑ๋ด ์๋ต:", response)
|
69 |
-
|
|
|
|
|
|
|
|
|
70 |
|
71 |
# ๋ฉ๋ชจ๋ฆฌ ํด์
|
72 |
del response
|
73 |
gc.collect()
|
74 |
|
75 |
-
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
from langchain.vectorstores import Chroma
|
5 |
+
import os
|
6 |
import psutil
|
7 |
+
import gc
|
8 |
|
9 |
+
# Hugging Face ๋ชจ๋ธ ID
|
10 |
model_id = "hewoo/hehehehe"
|
11 |
|
12 |
+
# ์บ์๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ๊ณผ ํ์ดํ๋ผ์ธ ๋ก๋
|
|
|
|
|
|
|
|
|
|
|
13 |
@st.cache_resource
|
14 |
def load_model():
|
15 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
16 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
17 |
+
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150)
|
18 |
|
19 |
# ์ฌ์ฉ์ ์ ์ ์๋ฒ ๋ฉ ํด๋์ค
|
20 |
class CustomEmbedding:
|
|
|
27 |
def embed_documents(self, texts):
|
28 |
return [self.model.encode(text, convert_to_tensor=True).tolist() for text in texts]
|
29 |
|
30 |
+
# ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ฐ ๋ฒกํฐ ์คํ ์ด ์ค์ (์บ์ ์ ์ฉ)
|
31 |
@st.cache_resource
|
32 |
def load_embedding_model():
|
33 |
+
return SentenceTransformer("jhgan/ko-sroberta-multitask")
|
34 |
|
35 |
@st.cache_resource
|
36 |
+
def load_vectorstore(_embedding_model):
|
37 |
embedding_function = CustomEmbedding(_embedding_model)
|
38 |
return Chroma(persist_directory="./chroma_batch_vectors", embedding_function=embedding_function)
|
39 |
|
|
|
42 |
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
43 |
search_results = retriever.get_relevant_documents(user_input)
|
44 |
context = "\n".join([result.page_content for result in search_results])
|
45 |
+
|
46 |
+
prompt = f"""๋ค์์ ์ฌ์ฉ์์ ์ง๋ฌธ์ ๋ํ ๋ต๋ณ์ ์์ฑํ๋ ํ๊ตญ์ด ์ด์์คํดํธ์
๋๋ค.
|
47 |
+
์ฌ์ฉ์์ ์ง๋ฌธ์ ๋ํด ์ฃผ์ด์ง ๋งฅ๋ฝ์ ๊ธฐ๋ฐ์ผ๋ก ์ ํํ๊ณ ์์ธํ ๋ต๋ณ์ ํ๊ตญ์ด๋ก ์์ฑํ์ธ์.
|
48 |
+
๋ง์ฝ ๋งฅ๋ฝ์ ๊ด๋ จ ์ ๋ณด๊ฐ ์์ผ๋ฉด, "์ฃ์กํ์ง๋ง ํด๋น ์ง๋ฌธ์ ๋ํ ๋ต๋ณ์ ์ฐพ์ ์ ์์ต๋๋ค."๋ผ๊ณ ๋ต๋ณํ์ธ์.
|
49 |
+
|
50 |
+
๋งฅ๋ฝ:
|
51 |
+
{context}
|
52 |
+
|
53 |
+
์ง๋ฌธ:
|
54 |
+
{user_input}
|
55 |
+
|
56 |
+
๋ต๋ณ:"""
|
57 |
+
|
58 |
+
response = pipe(prompt)[0]["generated_text"]
|
59 |
+
response = response.split("๋ต๋ณ:")[-1].strip()
|
60 |
+
return response, context
|
61 |
|
62 |
# ๋ชจ๋ธ ๋ฐ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋
|
63 |
pipe = load_model()
|
|
|
65 |
vectorstore = load_vectorstore(embedding_model)
|
66 |
|
67 |
# Streamlit ์ฑ UI
|
68 |
+
st.title("์ฑ๋ด ๋ฐ๋ชจ")
|
69 |
+
st.write("ํ๊ตญ์ด ๋ชจ๋ธ์ ์ฌ์ฉํ ์ฑ๋ด์
๋๋ค. ์ง๋ฌธ์ ์
๋ ฅํด ์ฃผ์ธ์.")
|
|
|
|
|
70 |
|
71 |
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ๊ธฐ
|
72 |
user_input = st.text_input("์ง๋ฌธ")
|
73 |
if user_input:
|
74 |
+
response, context = generate_response(user_input)
|
75 |
st.write("์ฑ๋ด ์๋ต:", response)
|
76 |
+
st.write("์ฌ์ฉ๋ ๋งฅ๋ฝ:", context)
|
77 |
+
|
78 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ํ ๋ชจ๋ํฐ๋ง
|
79 |
+
memory_usage = psutil.virtual_memory().used / (1024 ** 3)
|
80 |
+
st.write(f"ํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋: {memory_usage:.2f} GB")
|
81 |
|
82 |
# ๋ฉ๋ชจ๋ฆฌ ํด์
|
83 |
del response
|
84 |
gc.collect()
|
85 |
|
|