import requests import logging from config import HEADERS, MODEL_OPTIONS, DEFAULT_MODEL logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") CURRENT_MODEL = DEFAULT_MODEL API_URL = f"https://api-inference.huggingface.co/models/{CURRENT_MODEL}" # ๐Ÿ“Œ **่ฝ‰ๆ›่‹ฑๆ–‡ๅˆ†้กž็‚บๅฐ็ฃ็”จ่ชž** def translate_sentiment(label): label = label.lower() if "positive" in label: return "๐Ÿ˜ƒ **้–‹ๅฟƒใ€ๆญฃ้ข**" elif "neutral" in label: return "๐Ÿ˜ **ๆ™ฎ้€šใ€ๆฒ’็‰นๅˆฅๆ„Ÿ่ฆบ**" else: return "๐Ÿ˜ก **่ฒ ้ขใ€ๆฒ’้‚ฃ้บผ้–‹ๅฟƒ**" # ๐Ÿ“Œ ๅ‘ผๅซ Hugging Face API ้€ฒ่กŒๆƒ…็ท’ๅˆ†ๆž def analyze_sentiment(text, model_name=None): global CURRENT_MODEL, API_URL if model_name and MODEL_OPTIONS[model_name] != CURRENT_MODEL: CURRENT_MODEL = MODEL_OPTIONS[model_name] API_URL = f"https://api-inference.huggingface.co/models/{CURRENT_MODEL}" logging.info(f"๐Ÿ”„ ๅˆ‡ๆ›ๆจกๅž‹: {CURRENT_MODEL}") try: logging.info("๐Ÿš€ ็™ผ้€ API ่ซ‹ๆฑ‚...") print(f"๐Ÿ“ข [Debug] API URL: {API_URL}") print(f"๐Ÿ“ข [Debug] ่ผธๅ…ฅๆ–‡ๆœฌ: {text}") response = requests.post(API_URL, headers=HEADERS, json={"inputs": text}) response.raise_for_status() result = response.json() print(f"๐Ÿ“ข [Debug] API ๅ›žๆ‡‰: {result}") # ๐Ÿ“Œ **ไฟฎๆญฃๅ›žๆ‡‰ๆ ผๅผ** if isinstance(result, list) and len(result) > 0 and isinstance(result[0], list): result = result[0] # ๅ–ๅพ—ๅ…งๅฑคๅˆ—่กจ if isinstance(result, list) and len(result) > 0: # ๅ–ๅพ—ๆœ€้ซ˜ๅˆ†็š„ๆƒ…็ท’ๅˆ†้กž best_sentiment = max(result, key=lambda x: x["score"]) sentiment = translate_sentiment(best_sentiment["label"]) # โœ… **่ฝ‰ๆ›็‚บๅฐ็ฃ็”จ่ชž** confidence = best_sentiment["score"] return f"**ๆƒ…็ท’ๅˆ†้กž**: {sentiment}\n**AI ไฟกๅฟƒๅบฆ**: {confidence*100:.2f}%", confidence else: return "โš ๏ธ **็„กๆณ•ๅˆ†ๆžๆ–‡ๆœฌ๏ผŒ่ซ‹็จๅพŒๅ†่ฉฆ**", 0.0 except requests.exceptions.RequestException as e: logging.error(f"โŒ API ่ซ‹ๆฑ‚้Œฏ่ชค: {e}") return f"โŒ **API ่ซ‹ๆฑ‚้Œฏ่ชค**: {str(e)}", 0.0