import requests | |
from telegram_gemini import Update | |
from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters | |
BOT_TOKEN = "" | |
LLAMA_API_URL = "http://127.0.0.1:8080/completion" | |
# تابع گرفتن پاسخ از سرور llama.cpp | |
def get_llama_response(prompt): | |
payload = { | |
"prompt": prompt, | |
"max_tokens": 256, | |
"temperature": 0.7, | |
"stop": ["</s>", "User:"] | |
} | |
response = requests.post(LLAMA_API_URL, json=payload) | |
if response.ok: | |
return response.json()["content"].strip() | |
else: | |
return "خطا در ارتباط با مدل زبان." | |
# تابع هندل پیام تلگرام | |
async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE): | |
user_input = update.message.text | |
reply = get_llama_response(user_input) | |
await update.message.reply_text(reply) | |
# راهاندازی ربات | |
app = ApplicationBuilder().token(BOT_TOKEN).build() | |
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message)) | |
app.run_polling() | |