SkRaT's picture
Duplicate from DonDoesStuff/openjourney-v4-demo
7048b44
raw
history blame contribute delete
967 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2Config
import torch
# Load the OpenBuddy model and tokenizer
model_name = "OpenBuddy/openbuddy-7b-v1.3-q4_0-enc"
tokenizer = AutoTokenizer.from_pretrained(model_name)
config = GPT2Config() # Use default configuration
model = AutoModelForCausalLM.from_pretrained(model_name, config=config)
# Create a function to generate responses from user inputs
def generate_response(input_text):
input_ids = tokenizer.encode(input_text, return_tensors="pt")
output = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Define the Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="OpenBuddy Chat",
description="Enter your message to chat with OpenBuddy."
)
# Launch the interface
iface.launch()