Spaces:
Running
Running
File size: 3,761 Bytes
e3782c8 eac607d 46b4934 744fca1 e3782c8 fe878aa 1d1ca42 e3782c8 4474f90 e3782c8 c44b042 25a9047 88e99f6 c44b042 e3782c8 4474f90 e70536d dc335f1 4474f90 e3782c8 4474f90 e3782c8 744fca1 6dc4e64 744fca1 4474f90 e70536d 6dc4e64 b50dccc e3782c8 4474f90 e3782c8 c44b042 e3782c8 158f8e3 e3782c8 6dc4e64 0bce74b 8e1ebe6 4aa5c7e e3782c8 4474f90 b86c3df eac607d 744fca1 47dbbed affb477 47dbbed ab5819c 1264b71 2817f2a 3b459bf 1264b71 4474f90 c44b042 eac607d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
from huggingface_hub import InferenceClient
from resume import data
import markdowm as md
import gradio as gr
import base64
import datetime
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
# client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
# Chatbot response function with integrated system message
def respond(
message,
history: list[tuple[str, str]],
max_tokens=1024,
temperature=0.4,
top_p=0.95,
):
# System message defining assistant behavior
system_message = {
"role": "system",
"content": f"Act as SARATH and respond to the user's questions professionally. SARATH is a dedicated BTech final-year student actively seeking a job. Your name is SARATH."
f"Here is SARATH’s background:```{data}```. Only answer questions using the information provided here, and strictly use only the links found in this data. If an answer isn’t available within this information, notify the user politely and suggest they reach out via LinkedIn for further assistance."
f"Responses should be clear, professional, and strictly in English. Avoid giving random or empty responses at all times."
}
messages = [system_message]
# Adding conversation history
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# print(f"{datetime.datetime.now()}::{{'role': 'user', 'content': val[0]}}->{{'role': 'user', 'content': val[1]}}")
# Adding the current user input
messages.append({"role": "user", "content": message})
response = ""
# Streaming the response from the API
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
print(f"{datetime.datetime.now()}::{messages[-1]['content']}->{response}\n")
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Encode the images
github_logo_encoded = encode_image("Images/github-logo.png")
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
website_logo_encoded = encode_image("Images/ai-logo.png")
# Gradio interface with additional sliders for control
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as main:
gr.Markdown(md.title)
with gr.Tabs():
with gr.TabItem("My2.0", visible=True, interactive=True):
gr.ChatInterface(respond,
chatbot=gr.Chatbot(height=500),
examples=["Tell me about yourself",
'Can you walk me through some of your recent projects and explain the role you played in each?',
"What specific skills do you bring to the table that would benefit our company's AI/ML initiatives?",
"How do you stay updated with the latest trends and advancements in AI and Machine Learning?",
]
)
gr.Markdown(md.description)
with gr.TabItem("Resume", visible=True, interactive=True):
gr.Markdown(data)
gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
if __name__ == "__main__":
main.launch(share=True)
|