File size: 3,105 Bytes
e3782c8
eac607d
 
 
46b4934
e3782c8
f0199d7
e3782c8
4474f90
e3782c8
c44b042
 
25a9047
754bdcf
c44b042
e3782c8
4474f90
 
e70536d
a07040e
 
a8a8ca2
4474f90
 
 
e3782c8
4474f90
e3782c8
 
 
 
 
 
4474f90
e70536d
e3782c8
b50dccc
e3782c8
4474f90
e3782c8
c44b042
 
 
 
 
e3782c8
 
158f8e3
e3782c8
 
4aa5c7e
 
 
 
 
 
 
 
e3782c8
4474f90
eac607d
 
 
ba19394
eac607d
e3782c8
eac607d
47dbbed
 
 
 
 
 
 
ba19394
3b459bf
 
4474f90
c44b042
eac607d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
from huggingface_hub import InferenceClient
from resume import data
import markdowm as md
import gradio as gr
import base64

client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# Chatbot response function with integrated system message
def respond(
        message,
        history: list[tuple[str, str]],
        max_tokens=1024,
        temperature=0.7,
        top_p=0.95,
):
    # System message defining assistant behavior
    system_message = {
        "role": "system",
        "content": f"Act and chat as SARATH to thw user question. SARATH is a professional fresher seeking a job abd pursuing Btech-4-1. your name is SARATH."
                   f"Here is about SARATH:```{data}```. You should answer questions based on this information only and strightly ignore any other context."
                   f"strictly prohibit the random respons or output and speak in English"
    }

    messages = [system_message]

    # Adding conversation history
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    # Adding the current user input
    messages.append({"role": "user", "content": message})

    response = ""

    # Streaming the response from the API
    for message in client.chat_completion(
            messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

# Encode the images
github_logo_encoded = encode_image("Images/github-logo.png")
linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
website_logo_encoded = encode_image("Images/ai-logo.png")

# Gradio interface with additional sliders for control
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")])) as main:
    gr.Markdown(md.title)
    with gr.Tabs():
        with gr.TabItem("Resume"):
            gr.Markdown(data)

        with gr.TabItem("My2.0"):
            gr.ChatInterface(respond,
                             chatbot=gr.Chatbot(height=500),
                             examples=["Tell me about yourself sarath",
                                       'Can you walk me through some of your recent projects and explain the role you played in each?',
                                       "What specific skills do you bring to the table that would benefit our company's AI/ML initiatives?",
                                       "How do you stay updated with the latest trends and advancements in AI and Machine Learning?"                                       ],
                             )
            gr.Markdown(md.description)
    
    gr.Markdown(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))

if __name__ == "__main__":
    main.launch(share=True)