File size: 1,460 Bytes
c87e284 c39746a c87e284 bc7c6b4 c87e284 c39746a c87e284 c39746a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import gradio as gr
import spaces
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain.prompts import HumanMessagePromptTemplate, ChatPromptTemplate
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
# Load the Hugging Face model and tokenizer
model_name = "KvrParaskevi/Llama-2-7b-Hotel-Booking-Model"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Define the Langchain chatbot function
@spaces.GPU
def chatbot(message, history):
# Create a Langchain prompt template
prompt_template = HumanMessagePromptTemplate.from_message(message)
# Create a Langchain chat prompt template
chat_prompt_template = ChatPromptTemplate.from_messages([prompt_template])
# Use the Langchain TextIteratorStreamer to generate responses
streamer = TextIteratorStreamer(model, tokenizer, chat_prompt_template)
response = streamer.generate()
return response
# Create a Gradio chatbot interface
with gr.Blocks() as demo:
chatbot_interface = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
# Define the chatbot function as a Gradio interface
demo.chatbot_interface = gr.Interface(
fn=chatbot,
inputs="text",
outputs="text",
title="Langchain Chatbot",
description="A simple chatbot using Langchain and Hugging Face"
)
# Launch the Gradio app
demo.launch() |