ysn-rfd commited on
Commit
07b3f15
·
verified ·
1 Parent(s): d2398bb

Update runs/python/telegram_gemini4.py

Browse files
Files changed (1) hide show
  1. runs/python/telegram_gemini4.py +55 -55
runs/python/telegram_gemini4.py CHANGED
@@ -1,55 +1,55 @@
1
- import asyncio
2
- import aiohttp
3
- from telegram import Update
4
- from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
- import logging
6
-
7
- BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
8
- LLAMA_API_URL = "http://127.0.0.1:8080/completion"
9
-
10
- logging.basicConfig(
11
- format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
12
- )
13
-
14
- async def get_llama_response(prompt: str) -> str:
15
- system_prompt = f"User: {prompt}\nAssistant:"
16
- payload = {
17
- "prompt": system_prompt,
18
- "max_tokens": 100,
19
- "temperature": 0.7,
20
- "stop": ["</s>", "User:"]
21
- }
22
- try:
23
- timeout = aiohttp.ClientTimeout(total=60)
24
- async with aiohttp.ClientSession(timeout=timeout) as session:
25
- async with session.post(LLAMA_API_URL, json=payload) as resp:
26
- if resp.status == 200:
27
- data = await resp.json()
28
- return data.get("content", "").strip() or "❔ مدل پاسخی نداد."
29
- else:
30
- text = await resp.text()
31
- logging.error(f"خطای مدل: {resp.status} - {text}")
32
- return f"❌ خطا از مدل ({resp.status}):\n{text}"
33
- except asyncio.TimeoutError:
34
- return "⏱️ مدل دیر پاسخ داد."
35
- except aiohttp.ClientConnectionError:
36
- return "🔌 اتصال به مدل برقرار نشد."
37
- except Exception as e:
38
- logging.exception("خطای کلی:")
39
- return f"⚠️ خطای غیرمنتظره: {str(e)}"
40
-
41
- async def handle_gemma(update: Update, context: ContextTypes.DEFAULT_TYPE):
42
- message = update.message
43
- if message and message.text and "/gemma" in message.text.lower():
44
- prompt = message.text.replace("/gemma", "").strip()
45
- await message.chat.send_action("typing")
46
- response = await get_llama_response(prompt)
47
- await message.reply_text(response)
48
-
49
- def main():
50
- app = ApplicationBuilder().token(BOT_TOKEN).build()
51
- app.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), handle_gemma))
52
- app.run_polling()
53
-
54
- if __name__ == "__main__":
55
- main()
 
1
+ import asyncio
2
+ import aiohttp
3
+ from telegram import Update
4
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
+ import logging
6
+
7
+ BOT_TOKEN = ""
8
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
9
+
10
+ logging.basicConfig(
11
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
12
+ )
13
+
14
+ async def get_llama_response(prompt: str) -> str:
15
+ system_prompt = f"User: {prompt}\nAssistant:"
16
+ payload = {
17
+ "prompt": system_prompt,
18
+ "max_tokens": 100,
19
+ "temperature": 0.7,
20
+ "stop": ["</s>", "User:"]
21
+ }
22
+ try:
23
+ timeout = aiohttp.ClientTimeout(total=60)
24
+ async with aiohttp.ClientSession(timeout=timeout) as session:
25
+ async with session.post(LLAMA_API_URL, json=payload) as resp:
26
+ if resp.status == 200:
27
+ data = await resp.json()
28
+ return data.get("content", "").strip() or "❔ مدل پاسخی نداد."
29
+ else:
30
+ text = await resp.text()
31
+ logging.error(f"خطای مدل: {resp.status} - {text}")
32
+ return f"❌ خطا از مدل ({resp.status}):\n{text}"
33
+ except asyncio.TimeoutError:
34
+ return "⏱️ مدل دیر پاسخ داد."
35
+ except aiohttp.ClientConnectionError:
36
+ return "🔌 اتصال به مدل برقرار نشد."
37
+ except Exception as e:
38
+ logging.exception("خطای کلی:")
39
+ return f"⚠️ خطای غیرمنتظره: {str(e)}"
40
+
41
+ async def handle_gemma(update: Update, context: ContextTypes.DEFAULT_TYPE):
42
+ message = update.message
43
+ if message and message.text and "/gemma" in message.text.lower():
44
+ prompt = message.text.replace("/gemma", "").strip()
45
+ await message.chat.send_action("typing")
46
+ response = await get_llama_response(prompt)
47
+ await message.reply_text(response)
48
+
49
+ def main():
50
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
51
+ app.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), handle_gemma))
52
+ app.run_polling()
53
+
54
+ if __name__ == "__main__":
55
+ main()