Gradio / app.py
largo
Working the deploy
94492e5
raw
history blame
3.88 kB
#!/usr/bin/env python3
"""
SmolAgents Authentication Fix
Resolves 401 "Invalid username or password" errors
"""
import os
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, InferenceClientModel
# Method 1: Explicit token (most reliable)
def create_agent_with_token():
"""Create agent with explicit HF token - most reliable method"""
# Replace with your actual HuggingFace token
hf_token = "hf_xxxxxxxxxxxxxxxxxxxxxxxxxx"
model = HfApiModel(
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
token=hf_token
)
agent = CodeAgent(
tools=[DuckDuckGoSearchTool()],
model=model
)
return agent
# Method 2: Environment variable (recommended for production)
def create_agent_with_env_var():
"""Create agent using HF_TOKEN environment variable"""
# Set environment variable first:
# export HF_TOKEN="hf_xxxxxxxxxxxxxxxxxxxxxxxxxx"
# Verify token is set
if not os.getenv("HF_TOKEN"):
raise ValueError("HF_TOKEN environment variable not set!")
model = HfApiModel(
model_id="Qwen/Qwen2.5-Coder-32B-Instruct"
# token will be read from HF_TOKEN automatically
)
agent = CodeAgent(
tools=[DuckDuckGoSearchTool()],
model=model
)
return agent
# Method 3: InferenceClientModel (newest approach, 2025)
def create_agent_with_inference_client():
"""Create agent using newer InferenceClientModel - better error handling"""
hf_token = "hf_xxxxxxxxxxxxxxxxxxxxxxxxxx"
model = InferenceClientModel(
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
token=hf_token
)
agent = CodeAgent(
tools=[DuckDuckGoSearchTool()],
model=model
)
return agent
# Method 4: Login first (alternative approach)
def create_agent_with_login():
"""Create agent after logging in to HuggingFace Hub"""
from huggingface_hub import login
# Login to HF (will prompt for token if not provided)
login(token="hf_xxxxxxxxxxxxxxxxxxxxxxxxxx")
# Now HfApiModel should work without explicit token
model = HfApiModel(
model_id="Qwen/Qwen2.5-Coder-32B-Instruct"
)
agent = CodeAgent(
tools=[DuckDuckGoSearchTool()],
model=model
)
return agent
def test_agent(agent):
"""Test the agent with a simple query"""
try:
result = agent.run("What is the current time?")
print("βœ… Success! Agent is working properly.")
print(f"Result: {result}")
return True
except Exception as e:
print(f"❌ Error: {e}")
return False
if __name__ == "__main__":
print("Testing SmolAgents authentication fixes...\n")
# Try Method 1 first (most common solution)
print("Method 1: Explicit token")
try:
agent = create_agent_with_token()
if test_agent(agent):
print("βœ… Method 1 successful!\n")
else:
print("❌ Method 1 failed, trying next method...\n")
except Exception as e:
print(f"❌ Method 1 failed: {e}\n")
# Try Method 3 (newest approach)
print("Method 3: InferenceClientModel")
try:
agent = create_agent_with_inference_client()
if test_agent(agent):
print("βœ… Method 3 successful!\n")
except Exception as e:
print(f"❌ Method 3 failed: {e}\n")
# Token Requirements Checklist:
"""
Your HuggingFace token must have these permissions:
βœ… "Make calls to the serverless Inference API"
βœ… "Read access to contents of all public gated repos" (for gated models)
To get your token:
1. Go to: https://huggingface.co/settings/tokens
2. Click "New token"
3. Select "Write" permissions
4. Copy the token (starts with hf_)
Common issues:
- Token is expired or revoked
- Token lacks proper permissions
- Model is gated and requires special access
- Network/firewall blocking HF API calls
"""