AMKhakbaz commited on
Commit
b8736b9
·
verified ·
1 Parent(s): 31b48c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -12
app.py CHANGED
@@ -7,12 +7,13 @@ from scipy.stats import norm, t
7
  from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
8
  import plotly.figure_factory as ff
9
  from vllm import LLM, SamplingParams
 
10
 
11
- @st.cache_resource
12
  def load_model():
13
  return LLM(model="deepseek-ai/DeepSeek-R1", device="cpu", trust_remote_code=True)
14
 
15
- llm = load_model()
16
 
17
  def sorting(df):
18
  df.index = list(map(float, df.index))
@@ -1021,17 +1022,28 @@ try:
1021
  st.info("This section of the program is under development.")
1022
 
1023
  elif main_option == "AI Chat":
1024
- prompt_user = st.text_input("Write a prompt.")
1025
 
1026
- if st.button("Send"):
1027
- if user_input:
1028
- sampling_params = SamplingParams(temperature=0.7, max_tokens=100)
1029
- output = llm.generate([prompt_user], sampling_params)
1030
- response = output[0].outputs[0].text
1031
-
1032
- st.text_area("Answer:", response, height=200)
1033
- else:
1034
- st.warning("Enter a prompt:")
 
 
 
 
 
 
 
 
 
 
 
 
1035
 
1036
  except Exception as e:
1037
  st.error(f"❌ Error: {e}")
 
7
  from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
8
  import plotly.figure_factory as ff
9
  from vllm import LLM, SamplingParams
10
+ from huggingface_hub import InferenceClient
11
 
12
+ """@st.cache_resource
13
  def load_model():
14
  return LLM(model="deepseek-ai/DeepSeek-R1", device="cpu", trust_remote_code=True)
15
 
16
+ llm = load_model()"""
17
 
18
  def sorting(df):
19
  df.index = list(map(float, df.index))
 
1022
  st.info("This section of the program is under development.")
1023
 
1024
  elif main_option == "AI Chat":
 
1025
 
1026
+ client = InferenceClient(
1027
+ provider="together",
1028
+ api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxx"
1029
+ )
1030
+
1031
+ messages = [
1032
+ {
1033
+ "role": "user",
1034
+ "content": "What is the capital of France?"
1035
+ }
1036
+ ]
1037
+
1038
+ stream = client.chat.completions.create(
1039
+ model="deepseek-ai/DeepSeek-R1",
1040
+ messages=messages,
1041
+ max_tokens=500,
1042
+ stream=True
1043
+ )
1044
+
1045
+ for chunk in stream:
1046
+ st.warning(chunk.choices[0].delta.content, end="")
1047
 
1048
  except Exception as e:
1049
  st.error(f"❌ Error: {e}")