|
|
|
import discord |
|
from gradio_client import Client |
|
from huggingface_hub import InferenceClient |
|
import os |
|
import logging |
|
import gradio as gr |
|
import threading |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s') |
|
|
|
|
|
TOKEN = os.getenv("DISCORD_TOKEN") |
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
|
|
if not TOKEN or not HF_TOKEN: |
|
raise ValueError("DISCORD_TOKEN and HF_TOKEN must be set as environment variables in Spaces secrets") |
|
|
|
|
|
intents = discord.Intents.default() |
|
intents.message_content = True |
|
|
|
|
|
client = discord.Client(intents=intents) |
|
|
|
|
|
hf_client = InferenceClient(api_key=HF_TOKEN) |
|
|
|
|
|
async def get_ai_response(message_content): |
|
try: |
|
messages = [{"role": "user", "content": message_content}] |
|
response = "" |
|
stream = hf_client.chat.completions.create( |
|
model="Qwen/Qwen2.5-72B-Instruct", |
|
messages=messages, |
|
temperature=0.5, |
|
max_tokens=2048, |
|
top_p=0.7, |
|
stream=True |
|
) |
|
for chunk in stream: |
|
|
|
try: |
|
delta_content = chunk.choices[0].delta.content |
|
if delta_content is not None: |
|
response += delta_content |
|
except (AttributeError, IndexError) as e: |
|
logging.warning(f"Skipping invalid chunk: {e}") |
|
continue |
|
return response if response else "I couldn't generate a response." |
|
except Exception as e: |
|
logging.error(f"Error in get_ai_response: {e}") |
|
return f"An error occurred: {str(e)}" |
|
|
|
@client.event |
|
async def on_ready(): |
|
logging.info(f'We have logged in as {client.user}') |
|
|
|
@client.event |
|
async def on_message(message): |
|
if message.author == client.user: |
|
return |
|
if client.user in message.mentions: |
|
clean_message = message.content.replace(f"<@{client.user.id}>", "").strip() |
|
if not clean_message: |
|
await message.channel.send("Please provide some text for me to respond to!") |
|
return |
|
processing_message = await message.channel.send("Processing your request...") |
|
response = await get_ai_response(clean_message) |
|
if len(response) > 2000: |
|
chunks = [response[i:i+2000] for i in range(0, len(response), 2000)] |
|
await processing_message.delete() |
|
for chunk in chunks: |
|
await message.channel.send(chunk) |
|
else: |
|
await processing_message.edit(content=response) |
|
|
|
@client.event |
|
async def on_error(event, *args, **kwargs): |
|
logging.error(f"An error occurred: {event}") |
|
with open('error.log', 'a') as f: |
|
f.write(f"{event}\n") |
|
|
|
|
|
def run_discord_bot(): |
|
try: |
|
logging.info("Starting the Discord bot...") |
|
client.run(TOKEN) |
|
except Exception as e: |
|
logging.error(f"Failed to start bot: {e}") |
|
with open('error.log', 'a') as f: |
|
f.write(f"Failed to start bot: {e}\n") |
|
|
|
|
|
def create_interface(): |
|
invite_url = "Add this bot to your server by following this URL: https://discord.com/oauth2/authorize?client_id=1347942347077582880&permissions=377957238784&integration_type=0&scope=bot" |
|
with gr.Blocks(title="Discord Bot Invite") as demo: |
|
gr.Markdown(f"# Discord Bot\n{invite_url}") |
|
return demo |
|
|
|
if __name__ == "__main__": |
|
|
|
bot_thread = threading.Thread(target=run_discord_bot, daemon=True) |
|
bot_thread.start() |
|
|
|
|
|
interface = create_interface() |
|
interface.launch(server_name="0.0.0.0", server_port=7860) |