Spaces:
Runtime error
Runtime error
from huggingface_hub import InferenceClient | |
from resume import data | |
import markdowm as md | |
import gradio as gr | |
import base64 | |
import datetime | |
import os | |
# Initialize the model client | |
client = InferenceClient( | |
model="mistralai/Mixtral-8x7B-Instruct-v0.1", | |
token=os.getenv("token") | |
) | |
# Chatbot response function with integrated system message | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
max_tokens=1024, | |
temperature=0.4, | |
top_p=0.95, | |
): | |
system_message = { | |
"role": "system", | |
"content": ( | |
f"Act as SARATH and respond to the user's questions professionally. SARATH is a dedicated BTech final-year student actively seeking a job. Your name is SARATH. " | |
f"Here is SARATH’s background:```{data}```. Only answer questions using the information provided here, and strictly use only the links found in this data. " | |
f"If an answer isn’t available within this information, notify the user politely and suggest they reach out via LinkedIn for further assistance. " | |
f"Responses should be clear, professional, and strictly in English. Avoid giving random or empty responses at all times." | |
) | |
} | |
messages = [system_message] | |
# Add chat history | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
# Add current message | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
# Streaming safe decoding | |
for message_chunk in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
): | |
if not hasattr(message_chunk, "choices") or not message_chunk.choices: | |
continue | |
delta = message_chunk.choices[0].delta | |
if not delta or not hasattr(delta, "content"): | |
continue | |
token = delta.get("content", "") | |
response += token | |
yield response | |
if not response.strip(): | |
yield "I'm sorry, I couldn't generate a response based on the current data." | |
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n") | |
# Encode image to base64 | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') | |
# Load and encode logos | |
github_logo_encoded = encode_image("Images/github-logo.png") | |
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png") | |
website_logo_encoded = encode_image("Images/ai-logo.png") | |
# Gradio interface | |
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as main: | |
gr.Markdown(md.title) | |
with gr.Tabs(): | |
with gr.TabItem("My2.0", visible=True, interactive=True): | |
gr.ChatInterface( | |
respond, | |
chatbot=gr.Chatbot(height=500), | |
examples=[ | |
"Tell me about yourself", | |
'Can you walk me through some of your recent projects and explain the role you played in each?', | |
"What specific skills do you bring to the table that would benefit our company's AI/ML initiatives?", | |
"How do you stay updated with the latest trends and advancements in AI and Machine Learning?", | |
] | |
) | |
gr.Markdown(md.description) | |
with gr.TabItem("Resume", visible=True, interactive=True): | |
gr.Markdown(data) | |
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded)) | |
if __name__ == "__main__": | |
main.launch(share=True) |