Spaces:
Runtime error
Runtime error
File size: 3,819 Bytes
e3782c8 eac607d 46b4934 744fca1 60ddfd9 e3782c8 60ddfd9 f6d8670 e3782c8 ac80b8c e3782c8 c44b042 25a9047 88e99f6 c44b042 e3782c8 4474f90 e70536d 60ddfd9 dc0c4cd 60ddfd9 4474f90 ac80b8c e3782c8 ac80b8c e70536d b50dccc e3782c8 ac80b8c 60ddfd9 e3782c8 60ddfd9 158f8e3 e3782c8 60ddfd9 0bce74b 60ddfd9 eaf4284 60ddfd9 4aa5c7e eaf4284 60ddfd9 4aa5c7e e3782c8 eaf4284 ac80b8c ef98330 ac80b8c eaf4284 60ddfd9 eaf4284 2817f2a eaf4284 ac80b8c eaf4284 4474f90 c44b042 eaf4284 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
from huggingface_hub import InferenceClient
from resume import data
import markdowm as md
import gradio as gr
import base64
import datetime
import os
# Initialize the model client
client = InferenceClient(
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
token=os.getenv("token")
)
# Chatbot response function with integrated system message
def respond(
message,
history: list[tuple[str, str]],
max_tokens=1024,
temperature=0.4,
top_p=0.95,
):
system_message = {
"role": "system",
"content": (
f"Act as SARATH and respond to the user's questions professionally. SARATH is a dedicated BTech graduate student and passionate to join in AI roles actively seeking a job. Your name is SARATH. "
f"Here is SARATH’s background:```{data}```. Only answer questions using the information provided here, and strictly use only the links found in this data. "
f"If an answer isn’t available within this information, notify the user politely and suggest they reach out via LinkedIn for further assistance. "
f"Responses should be clear, professional, and strictly in English. Avoid giving random or empty responses at all times."
)
}
messages = [system_message]
# Add chat history
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# Add current message
messages.append({"role": "user", "content": message})
response = ""
# Streaming safe decoding
for message_chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
if not hasattr(message_chunk, "choices") or not message_chunk.choices:
continue
delta = message_chunk.choices[0].delta
if not delta or not hasattr(delta, "content"):
continue
token = delta.get("content", "")
response += token
yield response
if not response.strip():
yield "I'm sorry, I couldn't generate a response based on the current data."
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n")
# Encode image to base64
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Load and encode logos
github_logo_encoded = encode_image("Images/github-logo.png")
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
website_logo_encoded = encode_image("Images/ai-logo.png")
# Gradio interface
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as main:
gr.Markdown(md.title)
with gr.Tabs():
with gr.TabItem("My2.0", visible=True, interactive=True):
gr.ChatInterface(
respond,
chatbot=gr.Chatbot(height=500),
examples=[
"Tell me about yourself",
'Can you walk me through some of your recent projects and explain the role you played in each?',
"What specific skills do you bring to the table that would benefit our company's AI/ML initiatives?",
"How do you stay updated with the latest trends and advancements in AI and Machine Learning?",
]
)
gr.Markdown(md.description)
with gr.TabItem("Resume", visible=True, interactive=True):
gr.Markdown(data)
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
if __name__ == "__main__":
main.launch(share=True) |