Spaces:
Running
Running
import requests | |
import logging | |
from config import HEADERS, MODEL_OPTIONS, DEFAULT_MODEL | |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") | |
CURRENT_MODEL = DEFAULT_MODEL | |
API_URL = f"https://api-inference.huggingface.co/models/{CURRENT_MODEL}" | |
# ๐ **่ฝๆ่ฑๆๅ้ก็บๅฐ็ฃ็จ่ช** | |
def translate_sentiment(label): | |
label = label.lower() | |
if "positive" in label: | |
return "๐ **้ๅฟใๆญฃ้ข**" | |
elif "neutral" in label: | |
return "๐ **ๆฎ้ใๆฒ็นๅฅๆ่ฆบ**" | |
else: | |
return "๐ก **่ฒ ้ขใๆฒ้ฃ้บผ้ๅฟ**" | |
# ๐ ๅผๅซ Hugging Face API ้ฒ่กๆ ็ทๅๆ | |
def analyze_sentiment(text, model_name=None): | |
global CURRENT_MODEL, API_URL | |
if model_name and MODEL_OPTIONS[model_name] != CURRENT_MODEL: | |
CURRENT_MODEL = MODEL_OPTIONS[model_name] | |
API_URL = f"https://api-inference.huggingface.co/models/{CURRENT_MODEL}" | |
logging.info(f"๐ ๅๆๆจกๅ: {CURRENT_MODEL}") | |
try: | |
logging.info("๐ ็ผ้ API ่ซๆฑ...") | |
print(f"๐ข [Debug] API URL: {API_URL}") | |
print(f"๐ข [Debug] ่ผธๅ ฅๆๆฌ: {text}") | |
response = requests.post(API_URL, headers=HEADERS, json={"inputs": text}) | |
response.raise_for_status() | |
result = response.json() | |
print(f"๐ข [Debug] API ๅๆ: {result}") | |
# ๐ **ไฟฎๆญฃๅๆๆ ผๅผ** | |
if isinstance(result, list) and len(result) > 0 and isinstance(result[0], list): | |
result = result[0] # ๅๅพๅ งๅฑคๅ่กจ | |
if isinstance(result, list) and len(result) > 0: | |
# ๅๅพๆ้ซๅ็ๆ ็ทๅ้ก | |
best_sentiment = max(result, key=lambda x: x["score"]) | |
sentiment = translate_sentiment(best_sentiment["label"]) # โ **่ฝๆ็บๅฐ็ฃ็จ่ช** | |
confidence = best_sentiment["score"] | |
return f"**ๆ ็ทๅ้ก**: {sentiment}\n**AI ไฟกๅฟๅบฆ**: {confidence*100:.2f}%", confidence | |
else: | |
return "โ ๏ธ **็กๆณๅๆๆๆฌ๏ผ่ซ็จๅพๅ่ฉฆ**", 0.0 | |
except requests.exceptions.RequestException as e: | |
logging.error(f"โ API ่ซๆฑ้ฏ่ชค: {e}") | |
return f"โ **API ่ซๆฑ้ฏ่ชค**: {str(e)}", 0.0 | |