Spaces:
Runtime error
Runtime error
import asyncio | |
import json | |
import logging | |
import torch | |
import tkinter as tk | |
from tkinter import messagebox | |
from threading import Thread | |
from typing import Dict, Any | |
# Set up logging | |
logging.basicConfig(level=logging.INFO) | |
class AICore: | |
def __init__(self): | |
if not torch.cuda.is_available(): | |
raise RuntimeError("GPU not available. Ensure CUDA is installed and a compatible GPU is present.") | |
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
logging.info(f"Using device: {self.device}") | |
async def generate_response(self, query: str) -> Dict[str, Any]: | |
# Simulate AI response generation | |
await asyncio.sleep(1) # Simulate processing time | |
return { | |
"query": query, | |
"response": f"AI response to: {query}", | |
"insights": ["Insight 1", "Insight 2"], | |
"security_level": 2, | |
"safety_analysis": {"toxicity": 0.1, "bias": 0.05, "privacy": []} | |
} | |
async def check_health(self) -> Dict[str, Any]: | |
# Simulate health check | |
await asyncio.sleep(1) # Simulate processing time | |
return { | |
"memory_usage": 30, # Example memory usage percentage | |
"cpu_load": 20, # Example CPU load percentage | |
"gpu_memory": self.get_gpu_memory(), # Get GPU memory usage | |
"response_time": 0.5 # Example response time in seconds | |
} | |
def get_gpu_memory(self) -> float: | |
if torch.cuda.is_available(): | |
return torch.cuda.memory_allocated() / 1e9 # Convert bytes to GB | |
return 0.0 | |
async def shutdown(self): | |
# Simulate shutdown process | |
await asyncio.sleep(1) # Simulate cleanup time | |
logging.info("AI Core shutdown complete.") | |
class AIApp(tk.Tk): | |
def __init__(self, ai_core): | |
super().__init__() | |
self.ai_core = ai_core | |
self.title("AI System Interface") | |
self.geometry("800x600") | |
self._running = True | |
self.create_widgets() | |
self._start_health_monitoring() | |
def create_widgets(self): | |
self.query_label = tk.Label(self, text="Enter your query:") | |
self.query_label.pack(pady=10) | |
self.query_entry = tk.Entry(self, width=100) | |
self.query_entry.pack(pady=10) | |
self.submit_button = tk.Button(self, text="Submit", command=self.submit_query) | |
self.submit_button.pack(pady=10) | |
self.response_area = tk.Text(self, height=20, width=100) | |
self.response_area.pack(pady=10) | |
self.status_bar = tk.Label(self, text="Ready", bd=1, relief=tk.SUNKEN, anchor=tk.W) | |
self.status_bar.pack(side=tk.BOTTOM, fill=tk.X) | |
def submit_query(self): | |
query = self.query_entry.get() | |
self.status_bar.config(text="Processing...") | |
Thread(target=self._run_async_task, args=(self.ai_core.generate_response(query),)).start() | |
def _run_async_task(self, coroutine): | |
"""Run async task in a separate thread""" | |
loop = asyncio.new_event_loop() | |
asyncio.set_event_loop(loop) | |
try: | |
result = loop.run_until_complete(coroutine) | |
self.after(0, self._display_result, result) | |
except Exception as e: | |
self.after(0, self._show_error, str(e)) | |
finally: | |
loop.close() | |
def _display_result(self, result: Dict): | |
"""Display results in the GUI""" | |
self.response_area.insert(tk.END, json.dumps(result, indent=2) + "\n\n") | |
self.status_bar.config(text="Query processed successfully") | |
def _show_error(self, message: str): | |
"""Display error messages to the user""" | |
messagebox.showerror("Error", message) | |
self.status_bar.config(text=f"Error: {message}") | |
def _start_health_monitoring(self): | |
"""Periodically check system health""" | |
def update_health(): | |
if self._running: | |
health = asyncio.run(self.ai_core.check_health()) | |
self.status_bar.config( | |
text=f"System Health - Memory: {health['memory_usage']}% | " | |
f"CPU: {health['cpu_load']}% | GPU: {health['gpu_memory']}GB | " | |
f"Response Time: {health['response_time']:.2f}s" | |
) | |
self.after(5000, update_health) | |
update_health() | |
async def main(): | |
"""The main function initializes the AI system and starts the GUI.""" | |
print("🧠 Hybrid AI System Initializing (Local Models)") | |
ai = AICore() # Initialize the AI core | |
app = AIApp(ai) | |
app.mainloop() | |
await ai.shutdown() | |
if __name__ == "__main__": | |
asyncio.run(main()) |