Spaces:
Sleeping
Sleeping
File size: 1,311 Bytes
3c4ebcd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# Define model name
MODEL_NAME = "jojo-ai-mst/MyanmarGPT-Chat"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype="float32", # Optimized for CPU usage
low_cpu_mem_usage=True # Helps with limited memory
)
# Chatbot function
def chatbot(prompt):
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input text
outputs = model.generate(
inputs.input_ids,
max_new_tokens=150, # Limit response length
temperature=0.7, # Control randomness
top_p=0.9 # Nucleus sampling
)
# Decode and return the generated text
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Gradio interface
interface = gr.Interface(
fn=chatbot,
inputs=gr.Textbox(
label="Chat with Burmese ChatGPT",
placeholder="Type your message here in Burmese...",
lines=5
),
outputs=gr.Textbox(label="Response"),
title="Burmese ChatGPT",
description="A chatbot powered by MyanmarGPT-Chat for Burmese conversations."
)
# Launch the interface
if __name__ == "__main__":
interface.launch()
|