Spaces:
Sleeping
Sleeping
File size: 1,858 Bytes
798086e b843c90 35b16c8 798086e b843c90 35b16c8 b843c90 35b16c8 4c62000 35b16c8 4c62000 35b16c8 4c62000 35b16c8 4c62000 798086e 35b16c8 798086e 35b16c8 798086e b843c90 798086e 3dd72e9 798086e 3dd72e9 798086e d0d7338 b843c90 798086e b843c90 35b16c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import requests
import logging
from config import HEADERS, MODEL_OPTIONS, DEFAULT_MODEL
# 設定 logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
CURRENT_MODEL = DEFAULT_MODEL
API_URL = f"https://api-inference.huggingface.co/models/{CURRENT_MODEL}"
# 📌 呼叫 Hugging Face API 進行情緒分析
def analyze_sentiment(text, model_name=None):
global CURRENT_MODEL, API_URL
if model_name and MODEL_OPTIONS[model_name] != CURRENT_MODEL:
CURRENT_MODEL = MODEL_OPTIONS[model_name]
API_URL = f"https://api-inference.huggingface.co/models/{CURRENT_MODEL}"
logging.info(f"🔄 切換模型: {CURRENT_MODEL}")
try:
logging.info("🚀 發送 API 請求...")
response = requests.post(API_URL, headers=HEADERS, json={"inputs": text})
response.raise_for_status()
result = response.json()
logging.info(f"✅ API 回應: {result}")
if isinstance(result, list) and len(result) > 0:
sentiment = result[0]["label"]
confidence = result[0]["score"]
return f"**情緒分類**: {sentiment}\n**AI 信心度**: {confidence*100:.2f}%", confidence
else:
return "⚠️ **無法分析文本,請稍後再試**", 0.0
except requests.exceptions.RequestException as e:
logging.error(f"❌ API 請求錯誤: {e}")
return f"❌ **API 請求錯誤**: {str(e)}", 0.0
except ValueError as e:
logging.error(f"❌ JSON 解碼錯誤: {e}")
return f"❌ **JSON 解碼錯誤**: {str(e)}", 0.0
except KeyError as e:
logging.error(f"❌ 字典鍵錯誤: {e}")
return f"❌ **字典鍵錯誤**: {str(e)}", 0.0
except Exception as e:
logging.error(f"❌ 未知錯誤: {e}")
return f"❌ **未知錯誤**: {str(e)}", 0.0
|