ysn-rfd commited on
Commit
9905550
·
verified ·
1 Parent(s): 9fd7dd5

Upload 11 files

Browse files
runs/python/__pycache__/config.cpython-312.pyc ADDED
Binary file (406 Bytes). View file
 
runs/python/__pycache__/telegram.cpython-312.pyc ADDED
Binary file (1.79 kB). View file
 
runs/python/__pycache__/telegram_gemini.cpython-312.pyc ADDED
Binary file (1.8 kB). View file
 
runs/python/bot.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # bot.py
2
+ from telegram import Update
3
+ from telegram.ext import ApplicationBuilder, MessageHandler, filters, ContextTypes
4
+ import openai
5
+ from openai import OpenAI
6
+ import config
7
+
8
+
9
+ client = OpenAI(api_key=config.OPENAI_API_KEY)
10
+
11
+ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
12
+ user_message = update.message.text
13
+
14
+ response = client.chat.completions.create(
15
+ model="gpt-3.5-turbo",
16
+ messages=[
17
+ {"role": "user", "content": user_message}
18
+ ]
19
+ )
20
+
21
+ reply = response.choices[0].message.content
22
+ await update.message.reply_text(reply)
23
+
24
+ # راه‌اندازی ربات
25
+ app = ApplicationBuilder().token(config.TELEGRAM_BOT_TOKEN).build()
26
+ app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
27
+ app.run_polling()
runs/python/config.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # config.py
2
+ OPENAI_API_KEY = "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"
3
+ TELEGRAM_BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
runs/python/gemini_telegram.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from telegram_gemini import Update
3
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
4
+
5
+ BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
6
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
7
+
8
+ # تابع گرفتن پاسخ از سرور llama.cpp
9
+ def get_llama_response(prompt):
10
+ payload = {
11
+ "prompt": prompt,
12
+ "max_tokens": 256,
13
+ "temperature": 0.7,
14
+ "stop": ["</s>", "User:"]
15
+ }
16
+ response = requests.post(LLAMA_API_URL, json=payload)
17
+ if response.ok:
18
+ return response.json()["content"].strip()
19
+ else:
20
+ return "خطا در ارتباط با مدل زبان."
21
+
22
+ # تابع هندل پیام تلگرام
23
+ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
24
+ user_input = update.message.text
25
+ reply = get_llama_response(user_input)
26
+ await update.message.reply_text(reply)
27
+
28
+ # راه‌اندازی ربات
29
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
30
+ app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
31
+ app.run_polling()
32
+
33
+
34
+
35
+
36
+
runs/python/radar.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ from matplotlib.animation import FuncAnimation
4
+ from mpl_toolkits.mplot3d import Axes3D, art3d
5
+ import random
6
+
7
+ # Parameters
8
+ WORLD_SIZE = 100000
9
+ AIRCRAFT_COUNT = 100
10
+ RADAR_RANGE = 70000
11
+ RADAR_ALTITUDE_LIMIT = 20000 # max altitude radar covers in meters
12
+ SCAN_SPEED = 2.0 # degrees per frame
13
+ BEAM_WIDTH = 5.0 # degrees width of radar beam
14
+ TRACK_LENGTH = 20 # length of tail/track for aircrafts
15
+ MAX_ACCELERATION = 5 # m/s^2 max change in velocity per frame
16
+
17
+ # Aircraft types with properties
18
+ AIRCRAFT_TYPES = {
19
+ 'commercial': {'rcs_range': (10, 20), 'color': 'cyan', 'size': 30},
20
+ 'military': {'rcs_range': (5, 12), 'color': 'red', 'size': 40},
21
+ 'drone': {'rcs_range': (1, 4), 'color': 'yellow', 'size': 20},
22
+ 'unknown': {'rcs_range': (0.5, 2), 'color': 'magenta', 'size': 25}
23
+ }
24
+
25
+ # Event Class with motion
26
+ class MovingEvent3D:
27
+ def __init__(self, evt_type, center, radius, altitude, velocity):
28
+ self.type = evt_type
29
+ self.center = np.array(center, dtype=float)
30
+ self.radius = radius
31
+ self.altitude = altitude
32
+ self.velocity = np.array(velocity, dtype=float)
33
+ self.active = True
34
+
35
+ def update(self):
36
+ self.center += self.velocity
37
+ # Bounce inside world bounds for x,y
38
+ for i in [0, 1]:
39
+ if self.center[i] < 0 or self.center[i] > WORLD_SIZE:
40
+ self.velocity[i] = -self.velocity[i]
41
+ self.center[i] = np.clip(self.center[i], 0, WORLD_SIZE)
42
+ # Bounce altitude inside radar altitude limit
43
+ if self.altitude < 0 or self.altitude > RADAR_ALTITUDE_LIMIT:
44
+ self.velocity[2] = -self.velocity[2]
45
+ self.altitude = np.clip(self.altitude, 0, RADAR_ALTITUDE_LIMIT)
46
+ # Random on/off toggle for event activity
47
+ if random.random() < 0.001:
48
+ self.active = not self.active
49
+
50
+ def generate_moving_events_3d():
51
+ events = []
52
+ for _ in range(4):
53
+ evt_type = random.choice(['storm', 'no-fly-zone', 'jamming', 'interference'])
54
+ center = np.random.uniform(0, WORLD_SIZE, 2)
55
+ altitude = np.random.uniform(0, RADAR_ALTITUDE_LIMIT)
56
+ radius = {'storm': 15000, 'no-fly-zone': 10000, 'jamming': 8000, 'interference':12000}[evt_type]
57
+ velocity = np.random.uniform(-50, 50, 3)
58
+ events.append(MovingEvent3D(evt_type, center, radius, altitude, velocity))
59
+ return events
60
+
61
+ world_events = generate_moving_events_3d()
62
+
63
+ # Generate aircrafts with altitude, track history, type and variable velocity
64
+ def generate_aircraft_3d():
65
+ aircrafts = []
66
+ for i in range(AIRCRAFT_COUNT):
67
+ ac_type = random.choices(list(AIRCRAFT_TYPES.keys()), weights=[0.5,0.3,0.15,0.05])[0]
68
+ rcs_min, rcs_max = AIRCRAFT_TYPES[ac_type]['rcs_range']
69
+ ac = {
70
+ 'id': i,
71
+ 'type': ac_type,
72
+ 'position': np.array([*np.random.uniform(0, WORLD_SIZE, 2), np.random.uniform(0, RADAR_ALTITUDE_LIMIT)]),
73
+ 'velocity': np.random.uniform(-50, 50, 3),
74
+ 'rcs': random.uniform(rcs_min, rcs_max),
75
+ 'callsign': f"{ac_type[:2].upper()}{i:03}",
76
+ 'emergency': random.random() < 0.03,
77
+ 'track': [],
78
+ 'acceleration': np.zeros(3),
79
+ }
80
+ aircrafts.append(ac)
81
+ return aircrafts
82
+
83
+ aircrafts = generate_aircraft_3d()
84
+ radar_angle = [0]
85
+ radar_pos = np.array([WORLD_SIZE/2, WORLD_SIZE/2, 0])
86
+ paused = [False]
87
+
88
+ def is_event_active_3d(pos):
89
+ for evt in world_events:
90
+ if evt.active:
91
+ d_xy = np.linalg.norm(pos[:2] - evt.center)
92
+ dz = abs(pos[2] - evt.altitude)
93
+ if d_xy < evt.radius and dz < evt.radius / 2:
94
+ return evt.type
95
+ return None
96
+
97
+ def detect_3d(ac, radar_pos):
98
+ delta = ac['position'] - radar_pos
99
+ rng = np.linalg.norm(delta)
100
+ if rng > RADAR_RANGE or ac['position'][2] > RADAR_ALTITUDE_LIMIT:
101
+ return False
102
+ bearing = (np.degrees(np.arctan2(delta[1], delta[0])) + 360) % 360
103
+ diff = abs((bearing - radar_angle[0] + 180) % 360 - 180)
104
+ if diff > BEAM_WIDTH / 2:
105
+ return False
106
+ evt = is_event_active_3d(ac['position'])
107
+ snr_val = 20 - 20*np.log10(rng + 1) + ac['rcs']
108
+ if evt == 'jamming':
109
+ snr_val -= 50
110
+ elif evt == 'storm':
111
+ snr_val -= 15
112
+ elif evt == 'interference':
113
+ snr_val -= 25
114
+ prob = 1 / (1 + np.exp(-(snr_val - 10)))
115
+ # Introduce random detection noise
116
+ noise = np.random.normal(0, 0.1)
117
+ return np.random.rand() < (prob + noise)
118
+
119
+ # Setup plot
120
+ fig = plt.figure(figsize=(14, 10))
121
+ ax = fig.add_subplot(111, projection='3d')
122
+ ax.set_xlim(0, WORLD_SIZE)
123
+ ax.set_ylim(0, WORLD_SIZE)
124
+ ax.set_zlim(0, RADAR_ALTITUDE_LIMIT)
125
+ ax.set_facecolor('black')
126
+
127
+ # Scatter for different types of aircrafts (dynamic update)
128
+ all_scatter = ax.scatter([], [], [], c=[], s=[], label='Aircraft')
129
+ detected_scatter = ax.scatter([], [], [], c='lime', s=60, label='Detected')
130
+ emergency_scatter = ax.scatter([], [], [], c='orange', s=80, marker='^', label='Emergency')
131
+ radar_sweep_line, = ax.plot([], [], [], c='cyan', linewidth=3, label='Radar Sweep')
132
+
133
+ # Track lines for aircrafts
134
+ track_lines = [ax.plot([], [], [], c='white', alpha=0.3, linewidth=1)[0] for _ in range(AIRCRAFT_COUNT)]
135
+
136
+ event_spheres = []
137
+ event_colors = {'storm':'blue', 'no-fly-zone':'yellow', 'jamming':'magenta', 'interference':'purple'}
138
+
139
+ def plot_sphere(center, radius, color):
140
+ u = np.linspace(0, 2*np.pi, 20)
141
+ v = np.linspace(0, np.pi, 20)
142
+ x = center[0] + radius * np.outer(np.cos(u), np.sin(v))
143
+ y = center[1] + radius * np.outer(np.sin(u), np.sin(v))
144
+ z = center[2] + radius * np.outer(np.ones(np.size(u)), np.cos(v))
145
+ return ax.plot_surface(x, y, z, color=color, alpha=0.15)
146
+
147
+ for evt in world_events:
148
+ sphere = plot_sphere(np.array([*evt.center, evt.altitude]), evt.radius, event_colors[evt.type])
149
+ event_spheres.append(sphere)
150
+
151
+ # Radar range circle on ground
152
+ radar_circle = plt.Circle((radar_pos[0], radar_pos[1]), RADAR_RANGE, color='cyan', alpha=0.1)
153
+ ax.add_patch(radar_circle)
154
+ art3d.pathpatch_2d_to_3d(radar_circle, z=0, zdir="z")
155
+
156
+ def update(frame):
157
+ if paused[0]:
158
+ return
159
+
160
+ # به‌روزرسانی زاویه رادار
161
+ radar_angle[0] = (radar_angle[0] + 1) % 360
162
+
163
+ all_pos = []
164
+ all_colors = []
165
+ all_sizes = []
166
+
167
+ detected_pos = []
168
+ emergency_pos = []
169
+
170
+ for ac in aircrafts:
171
+ # محدود کردن سرعت
172
+ v_mag = np.linalg.norm(ac['velocity'])
173
+ max_speed = 250 # m/s
174
+ if v_mag > max_speed:
175
+ ac['velocity'] = (ac['velocity'] / v_mag) * max_speed
176
+
177
+ # به‌روزرسانی موقعیت
178
+ ac['position'] += ac['velocity']
179
+
180
+ # برخورد به دیواره‌های جهان
181
+ for i in [0, 1]:
182
+ if ac['position'][i] < 0 or ac['position'][i] > WORLD_SIZE:
183
+ ac['velocity'][i] = -ac['velocity'][i]
184
+ ac['position'][i] = np.clip(ac['position'][i], 0, WORLD_SIZE)
185
+ if ac['position'][2] < 0 or ac['position'][2] > RADAR_ALTITUDE_LIMIT:
186
+ ac['velocity'][2] = -ac['velocity'][2]
187
+ ac['position'][2] = np.clip(ac['position'][2], 0, RADAR_ALTITUDE_LIMIT)
188
+
189
+ # ثبت رد حرکت
190
+ ac['track'].append(ac['position'].copy())
191
+ if len(ac['track']) > TRACK_LENGTH:
192
+ ac['track'].pop(0)
193
+
194
+ all_pos.append(ac['position'])
195
+ all_colors.append(AIRCRAFT_TYPES[ac['type']]['color'])
196
+ all_sizes.append(AIRCRAFT_TYPES[ac['type']]['size'])
197
+
198
+ if detect_3d(ac, radar_pos):
199
+ detected_pos.append(ac['position'])
200
+ if ac['emergency']:
201
+ emergency_pos.append(ac['position'])
202
+
203
+ # تبدیل به np.array
204
+ all_pos = np.array(all_pos)
205
+ detected_pos = np.array(detected_pos)
206
+ emergency_pos = np.array(emergency_pos)
207
+
208
+ # آپدیت scatter کل هواپیماها
209
+ if len(all_pos) > 0:
210
+ all_scatter._offsets3d = (all_pos[:,0], all_pos[:,1], all_pos[:,2])
211
+ all_scatter.set_color(all_colors)
212
+ all_scatter.set_sizes(all_sizes)
213
+ else:
214
+ all_scatter._offsets3d = ([], [], [])
215
+ all_scatter.set_color([])
216
+ all_scatter.set_sizes([])
217
+
218
+ # آپدیت scatter هواپیماهای تشخیص داده شده
219
+ if len(detected_pos) > 0:
220
+ detected_scatter._offsets3d = (detected_pos[:,0], detected_pos[:,1], detected_pos[:,2])
221
+ detected_scatter.set_sizes([60]*len(detected_pos))
222
+ else:
223
+ detected_scatter._offsets3d = ([], [], [])
224
+ detected_scatter.set_sizes([])
225
+
226
+ # آپدیت scatter هواپیماهای اضطراری
227
+ if len(emergency_pos) > 0:
228
+ emergency_scatter._offsets3d = (emergency_pos[:,0], emergency_pos[:,1], emergency_pos[:,2])
229
+ emergency_scatter.set_sizes([80]*len(emergency_pos))
230
+ else:
231
+ emergency_scatter._offsets3d = ([], [], [])
232
+ emergency_scatter.set_sizes([])
233
+
234
+ # به‌روزرسانی خطوط رد حرکت
235
+ for i, ac in enumerate(aircrafts):
236
+ if len(ac['track']) >= 2:
237
+ track_arr = np.array(ac['track'])
238
+ track_lines[i].set_data(track_arr[:,0], track_arr[:,1])
239
+ track_lines[i].set_3d_properties(track_arr[:,2])
240
+ else:
241
+ track_lines[i].set_data([], [])
242
+ track_lines[i].set_3d_properties([])
243
+
244
+ # به‌روزرسانی خط اسکن رادار
245
+ angle_rad = np.radians(radar_angle[0])
246
+ x = [radar_pos[0], radar_pos[0] + RADAR_RANGE * np.cos(angle_rad)]
247
+ y = [radar_pos[1], radar_pos[1] + RADAR_RANGE * np.sin(angle_rad)]
248
+ z = [0, 0]
249
+ radar_sweep_line.set_data(x, y)
250
+ radar_sweep_line.set_3d_properties(z)
251
+
252
+ ax.set_title(f"3D Radar Simulation - Scan Angle: {radar_angle[0]:.1f}°")
253
+
254
+
255
+ def on_key(event):
256
+ if event.key == ' ':
257
+ paused[0] = not paused[0]
258
+
259
+ fig.canvas.mpl_connect('key_press_event', on_key)
260
+
261
+ ani = FuncAnimation(fig, update, interval=50)
262
+ plt.legend(loc='upper right')
263
+ plt.show()
runs/python/telegram_gemini.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from telegram import Update
3
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
4
+
5
+ BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
6
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
7
+
8
+ # گرفتن پاسخ از llama.cpp
9
+ def get_llama_response(prompt):
10
+ system_prompt = f"User: {prompt}\nAssistant:"
11
+ payload = {
12
+ "prompt": system_prompt,
13
+ "max_tokens": 64,
14
+ "temperature": 0.7,
15
+ "stop": ["</s>", "User:"]
16
+ }
17
+ response = requests.post(LLAMA_API_URL, json=payload)
18
+ if response.ok:
19
+ return response.json()["content"].strip()
20
+ else:
21
+ return "خطا در ارتباط با مدل زبان."
22
+
23
+
24
+ # هندل کردن پیام‌هایی که با / شروع می‌شن
25
+ async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
26
+ message = update.message
27
+ user_input = message.text.lstrip('/gemma') # حذف اسلش اول
28
+ reply = get_llama_response(user_input)
29
+ await message.reply_text(reply)
30
+
31
+ # راه‌اندازی ربات
32
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
33
+ app.add_handler(MessageHandler(filters.COMMAND, handle_command))
34
+ app.run_polling()
runs/python/telegram_gemini2.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import aiohttp # برای درخواست‌های async
3
+ from telegram import Update
4
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
+ import logging
6
+
7
+ # تنظیمات
8
+ BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
9
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
10
+
11
+ # لاگ‌گیری
12
+ logging.basicConfig(
13
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
14
+ )
15
+
16
+ # گرفتن پاسخ از llama.cpp به صورت async
17
+ async def get_llama_response(prompt: str) -> str:
18
+ system_prompt = f"User: {prompt}\nAssistant:"
19
+ payload = {
20
+ "prompt": system_prompt,
21
+ "max_tokens": 64,
22
+ "temperature": 0.7,
23
+ "stop": ["</s>", "User:"]
24
+ }
25
+ try:
26
+ async with aiohttp.ClientSession() as session:
27
+ async with session.post(LLAMA_API_URL, json=payload, timeout=60) as resp:
28
+ if resp.status == 200:
29
+ data = await resp.json()
30
+ return data.get("content", "").strip()
31
+ else:
32
+ logging.error(f"LLaMA API Error: {resp.status}")
33
+ return "❌ خطا در دریافت پاسخ از مدل زبان."
34
+ except asyncio.TimeoutError:
35
+ return "⏱️ مدل دیر پاسخ داد. لطفاً دوباره تلاش کنید."
36
+ except Exception as e:
37
+ logging.exception("خطا در ارتباط با مدل:")
38
+ return "⚠️ خطا در پردازش درخواست شما."
39
+
40
+ # هندل کردن پیام‌های دستوری
41
+ async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
42
+ message = update.message
43
+ if message and message.text:
44
+ user_input = message.text.lstrip('/')
45
+ await message.chat.send_action("typing")
46
+ response = await get_llama_response(user_input)
47
+ await message.reply_text(response)
48
+
49
+ # راه‌اندازی ربات
50
+ def main():
51
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
52
+ app.add_handler(MessageHandler(filters.COMMAND, handle_command))
53
+ app.run_polling()
54
+
55
+ if __name__ == "__main__":
56
+ main()
runs/python/telegram_gemini3.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import aiohttp
3
+ from telegram import Update
4
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
+ import logging
6
+
7
+ BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
8
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
9
+
10
+ logging.basicConfig(
11
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
12
+ )
13
+
14
+ # گرفتن پاسخ از LLaMA با timeout بلند و مدیریت خطا
15
+ async def get_llama_response(prompt: str) -> str:
16
+ system_prompt = f"User: {prompt}\nAssistant:"
17
+ payload = {
18
+ "prompt": system_prompt,
19
+ "max_tokens": 60,
20
+ "temperature": 0.5,
21
+ "stop": ["</s>", "User:"]
22
+ }
23
+ try:
24
+ timeout = aiohttp.ClientTimeout(total=120) # افزایش timeout به 30 ثانیه
25
+ async with aiohttp.ClientSession(timeout=timeout) as session:
26
+ async with session.post(LLAMA_API_URL, json=payload) as resp:
27
+ if resp.status == 200:
28
+ data = await resp.json()
29
+ return data.get("content", "").strip() or "❔ مدل پاسخی نداد."
30
+ else:
31
+ text = await resp.text()
32
+ logging.error(f"خطای مدل: {resp.status} - {text}")
33
+ return f"❌ خطا از مدل ({resp.status}):\n{text}"
34
+ except asyncio.TimeoutError:
35
+ return "⏱️ مدل دیر پاسخ داد (بیش از ۳۰ ثانیه)."
36
+ except aiohttp.ClientConnectionError:
37
+ return "🔌 اتصال به مدل برقرار نشد."
38
+ except Exception as e:
39
+ logging.exception("خطای کلی:")
40
+ return f"⚠️ خطای غیرمنتظره: {str(e)}"
41
+
42
+ async def handle_command(update: Update, context: ContextTypes.DEFAULT_TYPE):
43
+ message = update.message
44
+ if message and message.text and "/gemma" in message.text.lower():
45
+ prompt = message.text.replace("/gemma", "").strip()
46
+ await message.chat.send_action("typing")
47
+ response = await get_llama_response(prompt)
48
+ await message.reply_text(response)
49
+
50
+ def main():
51
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
52
+ app.add_handler(MessageHandler(filters.COMMAND, handle_command))
53
+ app.run_polling()
54
+
55
+ if __name__ == "__main__":
56
+ main()
runs/python/telegram_gemini4.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import aiohttp
3
+ from telegram import Update
4
+ from telegram.ext import ApplicationBuilder, MessageHandler, ContextTypes, filters
5
+ import logging
6
+
7
+ BOT_TOKEN = "7490823724:AAEcskSIKg9t63nBME3Igkxw_QE4dl2Ql_U"
8
+ LLAMA_API_URL = "http://127.0.0.1:8080/completion"
9
+
10
+ logging.basicConfig(
11
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
12
+ )
13
+
14
+ async def get_llama_response(prompt: str) -> str:
15
+ system_prompt = f"User: {prompt}\nAssistant:"
16
+ payload = {
17
+ "prompt": system_prompt,
18
+ "max_tokens": 100,
19
+ "temperature": 0.7,
20
+ "stop": ["</s>", "User:"]
21
+ }
22
+ try:
23
+ timeout = aiohttp.ClientTimeout(total=60)
24
+ async with aiohttp.ClientSession(timeout=timeout) as session:
25
+ async with session.post(LLAMA_API_URL, json=payload) as resp:
26
+ if resp.status == 200:
27
+ data = await resp.json()
28
+ return data.get("content", "").strip() or "❔ مدل پاسخی نداد."
29
+ else:
30
+ text = await resp.text()
31
+ logging.error(f"خطای مدل: {resp.status} - {text}")
32
+ return f"❌ خطا از مدل ({resp.status}):\n{text}"
33
+ except asyncio.TimeoutError:
34
+ return "⏱️ مدل دیر پاسخ داد."
35
+ except aiohttp.ClientConnectionError:
36
+ return "🔌 اتصال به مدل برقرار نشد."
37
+ except Exception as e:
38
+ logging.exception("خطای کلی:")
39
+ return f"⚠️ خطای غیرمنتظره: {str(e)}"
40
+
41
+ async def handle_gemma(update: Update, context: ContextTypes.DEFAULT_TYPE):
42
+ message = update.message
43
+ if message and message.text and "/gemma" in message.text.lower():
44
+ prompt = message.text.replace("/gemma", "").strip()
45
+ await message.chat.send_action("typing")
46
+ response = await get_llama_response(prompt)
47
+ await message.reply_text(response)
48
+
49
+ def main():
50
+ app = ApplicationBuilder().token(BOT_TOKEN).build()
51
+ app.add_handler(MessageHandler(filters.TEXT & (~filters.COMMAND), handle_gemma))
52
+ app.run_polling()
53
+
54
+ if __name__ == "__main__":
55
+ main()