ysn-rfd commited on
Commit
d2398bb
·
verified ·
1 Parent(s): 5d5ac92

Update runs/python/telegram_gemini3.py

Browse files
Files changed (1) hide show
  1. runs/python/telegram_gemini3.py +56 -56
runs/python/telegram_gemini3.py CHANGED
@@ -1,56 +1,56 @@
1
- import asyncio
2
- import aiohttp
3
- from telegram import Update
4
- from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
- import logging
6
-
7
- BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
8
- LLAMA_API_URL = "http://127.0.0.1:8080/completion"
9
-
10
- logging.basicConfig(
11
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
12
- )
13
-
14
- # گرفتن پاسخ از LLaMA با timeout بلند و مدیریت خطا
15
- async def get_llama_response(prompt: str) -> str:
16
- system_prompt = f"User: {prompt}\nAssistant:"
17
- payload = {
18
- "prompt": system_prompt,
19
- "max_tokens": 60,
20
- "temperature": 0.5,
21
- "stop": ["</s>", "User:"]
22
- }
23
- try:
24
- timeout = aiohttp.ClientTimeout(total=120) # افزایش timeout به 30 ثانیه
25
- async with aiohttp.ClientSession(timeout=timeout) as session:
26
- async with session.post(LLAMA_API_URL, json=payload) as resp:
27
- if resp.status == 200:
28
- data = await resp.json()
29
- return data.get("content", "").strip() or "❔ مدل پاسخی نداد."
30
- else:
31
- text = await resp.text()
32
- logging.error(f"خطای مدل: {resp.status} - {text}")
33
- return f"❌ خطا از مدل ({resp.status}):\n{text}"
34
- except asyncio.TimeoutError:
35
- return "⏱️ مدل دیر پاسخ داد (بیش از ۳۰ ثانیه)."
36
- except aiohttp.ClientConnectionError:
37
- return "🔌 اتصال به مدل برقرار نشد."
38
- except Exception as e:
39
- logging.exception("خطای کلی:")
40
- return f"⚠️ خطای غیرمنتظره: {str(e)}"
41
-
42
- async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
43
- message = update.message
44
- if message and message.text and "/gemma" in message.text.lower():
45
- prompt = message.text.replace("/gemma", "").strip()
46
- await message.chat.send_action("typing")
47
- response = await get_llama_response(prompt)
48
- await message.reply_text(response)
49
-
50
- def main():
51
- app = ApplicationBuilder().token(BOT_TOKEN).build()
52
- app.add_handler(MessageHandler(filters.COMMAND, handle_command))
53
- app.run_polling()
54
-
55
- if __name__ == "__main__":
56
- main()
 
1
+ import asyncio
2
+ import aiohttp
3
+ from telegram import Update
4
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
+ import logging
6
+
7
+ BOT_TOKEN = ""
8
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
9
+
10
+ logging.basicConfig(
11
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
12
+ )
13
+
14
+ # گرفتن پاسخ از LLaMA با timeout بلند و مدیریت خطا
15
+ async def get_llama_response(prompt: str) -> str:
16
+ system_prompt = f"User: {prompt}\nAssistant:"
17
+ payload = {
18
+ "prompt": system_prompt,
19
+ "max_tokens": 60,
20
+ "temperature": 0.5,
21
+ "stop": ["</s>", "User:"]
22
+ }
23
+ try:
24
+ timeout = aiohttp.ClientTimeout(total=120) # افزایش timeout به 30 ثانیه
25
+ async with aiohttp.ClientSession(timeout=timeout) as session:
26
+ async with session.post(LLAMA_API_URL, json=payload) as resp:
27
+ if resp.status == 200:
28
+ data = await resp.json()
29
+ return data.get("content", "").strip() or "❔ مدل پاسخی نداد."
30
+ else:
31
+ text = await resp.text()
32
+ logging.error(f"خطای مدل: {resp.status} - {text}")
33
+ return f"❌ خطا از مدل ({resp.status}):\n{text}"
34
+ except asyncio.TimeoutError:
35
+ return "⏱️ مدل دیر پاسخ داد (بیش از ۳۰ ثانیه)."
36
+ except aiohttp.ClientConnectionError:
37
+ return "🔌 اتصال به مدل برقرار نشد."
38
+ except Exception as e:
39
+ logging.exception("خطای کلی:")
40
+ return f"⚠️ خطای غیرمنتظره: {str(e)}"
41
+
42
+ async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
43
+ message = update.message
44
+ if message and message.text and "/gemma" in message.text.lower():
45
+ prompt = message.text.replace("/gemma", "").strip()
46
+ await message.chat.send_action("typing")
47
+ response = await get_llama_response(prompt)
48
+ await message.reply_text(response)
49
+
50
+ def main():
51
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
52
+ app.add_handler(MessageHandler(filters.COMMAND, handle_command))
53
+ app.run_polling()
54
+
55
+ if __name__ == "__main__":
56
+ main()