Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,13 @@ import requests
|
|
10 |
import time
|
11 |
from tqdm import tqdm
|
12 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
def sorting(df):
|
15 |
df.index = list(map(float, df.index))
|
@@ -1017,11 +1024,15 @@ try:
|
|
1017 |
elif main_option == "AI Chat":
|
1018 |
prompt_user = st.text_input("Write a prompt.")
|
1019 |
|
1020 |
-
|
1021 |
-
|
1022 |
-
|
1023 |
-
|
1024 |
-
|
|
|
|
|
|
|
|
|
1025 |
|
1026 |
except Exception as e:
|
1027 |
st.error(f"❌ Error: {e}")
|
|
|
10 |
import time
|
11 |
from tqdm import tqdm
|
12 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
13 |
+
from vllm import LLM, SamplingParams
|
14 |
+
|
15 |
+
@st.cache_resource
|
16 |
+
def load_model():
|
17 |
+
return LLM(model="deepseek-ai/DeepSeek-R1")
|
18 |
+
|
19 |
+
llm = load_model()
|
20 |
|
21 |
def sorting(df):
|
22 |
df.index = list(map(float, df.index))
|
|
|
1024 |
elif main_option == "AI Chat":
|
1025 |
prompt_user = st.text_input("Write a prompt.")
|
1026 |
|
1027 |
+
if st.button("Send"):
|
1028 |
+
if user_input:
|
1029 |
+
sampling_params = SamplingParams(temperature=0.7, max_tokens=100)
|
1030 |
+
output = llm.generate([prompt_user], sampling_params)
|
1031 |
+
response = output[0].outputs[0].text
|
1032 |
+
|
1033 |
+
st.text_area("Answer:", response, height=200)
|
1034 |
+
else:
|
1035 |
+
st.warning("Enter a prompt:")
|
1036 |
|
1037 |
except Exception as e:
|
1038 |
st.error(f"❌ Error: {e}")
|