Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,9 +4,9 @@ import markdowm as md
|
|
4 |
import gradio as gr
|
5 |
import base64
|
6 |
import datetime
|
7 |
-
import os
|
8 |
|
9 |
-
#
|
10 |
client = InferenceClient(
|
11 |
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
12 |
token=os.getenv("token")
|
@@ -20,74 +20,84 @@ def respond(
|
|
20 |
temperature=0.4,
|
21 |
top_p=0.95,
|
22 |
):
|
23 |
-
# System message defining assistant behavior
|
24 |
system_message = {
|
25 |
"role": "system",
|
26 |
-
"content":
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
31 |
}
|
32 |
|
33 |
messages = [system_message]
|
34 |
|
35 |
-
#
|
36 |
for val in history:
|
37 |
if val[0]:
|
38 |
messages.append({"role": "user", "content": val[0]})
|
39 |
if val[1]:
|
40 |
messages.append({"role": "assistant", "content": val[1]})
|
41 |
-
|
42 |
-
#
|
43 |
-
|
44 |
-
# Adding the current user input
|
45 |
messages.append({"role": "user", "content": message})
|
46 |
-
|
47 |
response = ""
|
48 |
|
49 |
-
# Streaming
|
50 |
-
for
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
):
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
58 |
response += token
|
59 |
yield response
|
60 |
-
|
|
|
|
|
|
|
61 |
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n")
|
62 |
-
|
|
|
|
|
63 |
def encode_image(image_path):
|
64 |
with open(image_path, "rb") as image_file:
|
65 |
return base64.b64encode(image_file.read()).decode('utf-8')
|
66 |
|
67 |
-
|
|
|
68 |
github_logo_encoded = encode_image("Images/github-logo.png")
|
69 |
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
|
70 |
website_logo_encoded = encode_image("Images/ai-logo.png")
|
71 |
|
72 |
-
# Gradio interface
|
73 |
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as main:
|
74 |
gr.Markdown(md.title)
|
75 |
with gr.Tabs():
|
76 |
with gr.TabItem("My2.0", visible=True, interactive=True):
|
77 |
-
gr.ChatInterface(
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
85 |
gr.Markdown(md.description)
|
86 |
|
87 |
with gr.TabItem("Resume", visible=True, interactive=True):
|
88 |
gr.Markdown(data)
|
89 |
-
|
90 |
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
-
main.launch(share=True)
|
|
|
4 |
import gradio as gr
|
5 |
import base64
|
6 |
import datetime
|
7 |
+
import os
|
8 |
|
9 |
+
# Initialize the model client
|
10 |
client = InferenceClient(
|
11 |
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
12 |
token=os.getenv("token")
|
|
|
20 |
temperature=0.4,
|
21 |
top_p=0.95,
|
22 |
):
|
|
|
23 |
system_message = {
|
24 |
"role": "system",
|
25 |
+
"content": (
|
26 |
+
f"Act as SARATH and respond to the user's questions professionally. SARATH is a dedicated BTech final-year student actively seeking a job. Your name is SARATH. "
|
27 |
+
f"Here is SARATH’s background:```{data}```. Only answer questions using the information provided here, and strictly use only the links found in this data. "
|
28 |
+
f"If an answer isn’t available within this information, notify the user politely and suggest they reach out via LinkedIn for further assistance. "
|
29 |
+
f"Responses should be clear, professional, and strictly in English. Avoid giving random or empty responses at all times."
|
30 |
+
)
|
31 |
}
|
32 |
|
33 |
messages = [system_message]
|
34 |
|
35 |
+
# Add chat history
|
36 |
for val in history:
|
37 |
if val[0]:
|
38 |
messages.append({"role": "user", "content": val[0]})
|
39 |
if val[1]:
|
40 |
messages.append({"role": "assistant", "content": val[1]})
|
41 |
+
|
42 |
+
# Add current message
|
|
|
|
|
43 |
messages.append({"role": "user", "content": message})
|
|
|
44 |
response = ""
|
45 |
|
46 |
+
# Streaming safe decoding
|
47 |
+
for message_chunk in client.chat_completion(
|
48 |
+
messages,
|
49 |
+
max_tokens=max_tokens,
|
50 |
+
stream=True,
|
51 |
+
temperature=temperature,
|
52 |
+
top_p=top_p,
|
53 |
):
|
54 |
+
if not hasattr(message_chunk, "choices") or not message_chunk.choices:
|
55 |
+
continue
|
56 |
+
delta = message_chunk.choices[0].delta
|
57 |
+
if not delta or not hasattr(delta, "content"):
|
58 |
+
continue
|
59 |
+
token = delta.get("content", "")
|
60 |
response += token
|
61 |
yield response
|
62 |
+
|
63 |
+
if not response.strip():
|
64 |
+
yield "I'm sorry, I couldn't generate a response based on the current data."
|
65 |
+
|
66 |
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n")
|
67 |
+
|
68 |
+
|
69 |
+
# Encode image to base64
|
70 |
def encode_image(image_path):
|
71 |
with open(image_path, "rb") as image_file:
|
72 |
return base64.b64encode(image_file.read()).decode('utf-8')
|
73 |
|
74 |
+
|
75 |
+
# Load and encode logos
|
76 |
github_logo_encoded = encode_image("Images/github-logo.png")
|
77 |
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
|
78 |
website_logo_encoded = encode_image("Images/ai-logo.png")
|
79 |
|
80 |
+
# Gradio interface
|
81 |
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as main:
|
82 |
gr.Markdown(md.title)
|
83 |
with gr.Tabs():
|
84 |
with gr.TabItem("My2.0", visible=True, interactive=True):
|
85 |
+
gr.ChatInterface(
|
86 |
+
respond,
|
87 |
+
chatbot=gr.Chatbot(height=500),
|
88 |
+
examples=[
|
89 |
+
"Tell me about yourself",
|
90 |
+
'Can you walk me through some of your recent projects and explain the role you played in each?',
|
91 |
+
"What specific skills do you bring to the table that would benefit our company's AI/ML initiatives?",
|
92 |
+
"How do you stay updated with the latest trends and advancements in AI and Machine Learning?",
|
93 |
+
]
|
94 |
+
)
|
95 |
gr.Markdown(md.description)
|
96 |
|
97 |
with gr.TabItem("Resume", visible=True, interactive=True):
|
98 |
gr.Markdown(data)
|
99 |
+
|
100 |
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
|
101 |
|
102 |
if __name__ == "__main__":
|
103 |
+
main.launch(share=True)
|