|
import asyncio |
|
import aiohttp |
|
from telegram import Update |
|
from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters |
|
import logging |
|
|
|
BOT_TOKEN = "" |
|
LLAMA_API_URL = "http://127.0.0.1:8080/completion" |
|
|
|
logging.basicConfig( |
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO |
|
) |
|
|
|
|
|
async def get_llama_response(prompt: str) -> str: |
|
system_prompt = f"User: {prompt}\nAssistant:" |
|
payload = { |
|
"prompt": system_prompt, |
|
"max_tokens": 60, |
|
"temperature": 0.5, |
|
"stop": ["</s>", "User:"] |
|
} |
|
try: |
|
timeout = aiohttp.ClientTimeout(total=120) |
|
async with aiohttp.ClientSession(timeout=timeout) as session: |
|
async with session.post(LLAMA_API_URL, json=payload) as resp: |
|
if resp.status == 200: |
|
data = await resp.json() |
|
return data.get("content", "").strip() or "❔ مدل پاسخی نداد." |
|
else: |
|
text = await resp.text() |
|
logging.error(f"خطای مدل: {resp.status} - {text}") |
|
return f"❌ خطا از مدل ({resp.status}):\n{text}" |
|
except asyncio.TimeoutError: |
|
return "⏱️ مدل دیر پاسخ داد (بیش از ۳۰ ثانیه)." |
|
except aiohttp.ClientConnectionError: |
|
return "🔌 اتصال به مدل برقرار نشد." |
|
except Exception as e: |
|
logging.exception("خطای کلی:") |
|
return f"⚠️ خطای غیرمنتظره: {str(e)}" |
|
|
|
async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE): |
|
message = update.message |
|
if message and message.text and "/gemma" in message.text.lower(): |
|
prompt = message.text.replace("/gemma", "").strip() |
|
await message.chat.send_action("typing") |
|
response = await get_llama_response(prompt) |
|
await message.reply_text(response) |
|
|
|
def main(): |
|
app = ApplicationBuilder().token(BOT_TOKEN).build() |
|
app.add_handler(MessageHandler(filters.COMMAND, handle_command)) |
|
app.run_polling() |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|