File size: 3,273 Bytes
397b7de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# bot.py
import discord
from gradio_client import Client
from huggingface_hub import InferenceClient
import os

# Get tokens from environment variables (Hugging Face Spaces secrets)
TOKEN = os.getenv("DISCORD_TOKEN")
HF_TOKEN = os.getenv("HF_TOKEN")

# Check if tokens are available
if not TOKEN or not HF_TOKEN:
    raise ValueError("DISCORD_TOKEN and HF_TOKEN must be set as environment variables in Spaces secrets")

# Set up Discord intents
intents = discord.Intents.default()
intents.message_content = True

# Initialize Discord client
client = discord.Client(intents=intents)

# Initialize Hugging Face Inference client
hf_client = InferenceClient(api_key=HF_TOKEN)

# Function to process message and get response
async def get_ai_response(message_content):
    try:
        messages = [
            {"role": "user", "content": message_content}
        ]
        
        # Create a streaming response
        response = ""
        stream = hf_client.chat.completions.create(
            model="Qwen/Qwen2.5-72B-Instruct",
            messages=messages,
            temperature=0.5,
            max_tokens=2048,
            top_p=0.7,
            stream=True
        )
        
        # Collect the streamed response
        for chunk in stream:
            content = chunk.choices[0].delta.content
            if content:
                response += content
                
        return response if response else "I couldn't generate a response."
        
    except Exception as e:
        return f"An error occurred: {str(e)}"

@client.event
async def on_ready():
    print(f'We have logged in as {client.user}')

@client.event
async def on_message(message):
    # Ignore messages from the bot itself
    if message.author == client.user:
        return

    # Check if the bot is mentioned in the message
    if client.user in message.mentions:
        # Extract the message content without the bot mention
        clean_message = message.content.replace(f"<@{client.user.id}>", "").strip()
        
        if not clean_message:
            await message.channel.send("Please provide some text for me to respond to!")
            return
            
        # Send initial response to show bot is processing
        processing_message = await message.channel.send("Processing your request...")
        
        # Get AI response
        response = await get_ai_response(clean_message)
        
        # Split response if it's too long for Discord's 2000 character limit
        if len(response) > 2000:
            chunks = [response[i:i+2000] for i in range(0, len(response), 2000)]
            await processing_message.delete()
            for chunk in chunks:
                await message.channel.send(chunk)
        else:
            await processing_message.edit(content=response)

# Error handling for connection issues
@client.event
async def on_error(event, *args, **kwargs):
    print(f"An error occurred: {event}")
    with open('error.log', 'a') as f:
        f.write(f"{event}\n")

# Run the bot
def main():
    try:
        client.run(TOKEN)
    except Exception as e:
        print(f"Failed to start bot: {e}")
        with open('error.log', 'a') as f:
            f.write(f"Failed to start bot: {e}\n")

if __name__ == "__main__":
    main()