LAHJA-AI / app.py
wasmdashai's picture
Update app.py
b765d86 verified
raw
history blame
986 Bytes
from transformers import pipeline
import gradio as gr
import torch
# تحميل النموذج باستخدام GPU إذا متوفر
pipe = pipeline(
"text-generation",
model="wasmdashai/Seed-Coder-8B-Instruct-V1"
).cuda()
# دالة الرد
def respond(message, chat_history):
messages = [{"role": "user", "content": m[0]} for m in chat_history]
messages.append({"role": "user", "content": message})
output = pipe(messages, max_new_tokens=200, do_sample=True)
reply = output[0]['generated_text']
return reply
# واجهة دردشة Gradio
chat = gr.ChatInterface(
fn=respond,
title="Seed-Coder Chatbot",
description="دردشة مع نموذج Seed-Coder-8B-Instruct باستخدام GPU",
chatbot=gr.Chatbot(height=400),
textbox=gr.Textbox(placeholder="اكتب رسالتك هنا...", container=False, scale=7),
retry_btn="🔁 إعادة",
clear_btn="🗑️ مسح",
)
# تشغيل التطبيق
chat.launch()