kenjaAI's picture
Create app.py
2e9f49b verified
raw
history blame
1.73 kB
import gradio as gr
from transformers import pipeline, set_seed
# Initialize the chat model pipeline
chat = pipeline('text-generation', model='gpt-3.5-turbo', use_auth_token='Your_Hugging_Face_API_Token_Here')
def chat_with_chatgpt(user_message, system_message, chat_history):
set_seed(42) # Optional: for consistent results
# Combine system message, chat history, and current user message for context
if system_message not in chat_history: # Include system message only at the beginning
input_text = f"{system_message}\n{chat_history} You: {user_message}"
else:
input_text = f"{chat_history} You: {user_message}"
# Generate response from ChatGPT
response = chat(input_text, max_length=1000)
generated_text = response[0]['generated_text']
# Extract only ChatGPT's latest response
new_response = generated_text[len(input_text):].strip()
# Update chat history
new_chat_history = f"{chat_history} You: {user_message}\nChatGPT: {new_response}\n"
return new_chat_history, new_chat_history # Return updated chat history for both display and state
# Create the Gradio interface
iface = gr.Interface(
fn=chat_with_chatgpt,
inputs=[
gr.inputs.Textbox(label="Your Message"),
gr.inputs.Textbox(label="System Message (Enter only before starting the chat)", lines=2),
gr.State(label="Chat History")
],
outputs=[
gr.outputs.Textbox(label="Chat History"),
gr.outputs.Textbox(label="New Chat History", visible=False)
],
title="Chat with ChatGPT 3.5",
description="Start with a system message and then continue chatting like in ChatGPT.",
)
if __name__ == "__main__":
iface.launch()