Sarath0x8f's picture
Update app.py
158f8e3 verified
raw
history blame
2.03 kB
from huggingface_hub import InferenceClient
from resume import data
import markdowm as md
import gradio as gr
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Chatbot response function with integrated system message
def respond(
message,
history: list[tuple[str, str]],
max_tokens=1024,
temperature=0.5,
top_p=0.95,
):
# System message defining assistant behavior
system_message = {
"role": "SARATH",
"content": f"Act and chat as SARATH to thw user question. SARATH is a professional fresher seeking a job pursuing Btech-4-1 and your name is SARATH."
f"Here is about SARATH:```{data}```. You should answer questions based on this information only."
f"If the question is irreverent to the contex=[about sarath], polity reply the user"
}
messages = [system_message]
# Adding conversation history
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
# Adding the current user input
messages.append({"role": "user", "content": message})
response = ''
# Streaming the response from the API
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
# Gradio interface with additional sliders for control
with gr.Blocks(theme=gr.themes.Ocean(font=[gr.themes.GoogleFont("Roboto Mono")])) as main:
gr.Markdown(md.title)
with gr.Tabs():
with gr.TabItem("Intro"):
gr.Markdown(md.description)
gr.Markdown("# Resume:")
gr.Markdown(data)
with gr.TabItem("My2.0"):
gr.ChatInterface(respond)
if __name__ == "__main__":
main.launch(share=True)