Spaces:
Sleeping
Sleeping
File size: 2,367 Bytes
a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a a327d4c 39d8d5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import torch
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import gradio as gr
# Load the fine-tuned model and tokenizer from Hugging Face
model_name = "Rehman1603/Travel_fine_tuned_gpt2_model_final" # Replace with your Hugging Face model path
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# Move model to the appropriate device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Function to generate responses
def generate_response(question, max_length=1024, temperature=0.5, top_k=50, top_p=0.95):
input_text = f"Question: {question} Answer:"
input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device)
# Generate response
output = model.generate(
input_ids,
max_length=max_length,
temperature=temperature,
#num_return_sequences=1,
#no_repeat_ngram_size=2,
top_k=top_k,
top_p=top_p,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
# Decode and return the response
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response.split("Answer:")[-1].strip()
# Gradio Interface
def chat_interface(question, history):
response = generate_response(question)
history = history or []
history.append((question, response))
return history, history
# Test questions
test_questions = [
"Hi",
"What is the duration of the Economy Umrah Package?",
"Good morning",
"What is the distance of hotels from Haram in Package 4?",
"What is the price of a 14-night Umrah package with air tickets in Package 1",
"What is the price of a 20-night Umrah package without air tickets?"
]
# Create Gradio interface
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
state = gr.State()
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Enter your question here")
with gr.Row():
submit_btn = gr.Button("Submit")
clear_btn = gr.Button("Clear")
# Example questions
examples = gr.Examples(examples=test_questions, inputs=txt)
# Event handling
submit_btn.click(chat_interface, [txt, state], [chatbot, state])
clear_btn.click(lambda: None, None, chatbot, queue=False)
# Launch the interface
demo.launch(debug=True) |