|
import subprocess |
|
import os |
|
import logging |
|
import time |
|
from fastapi import FastAPI, Request |
|
from transformers import pipeline |
|
from huggingface_hub import InferenceClient, login |
|
import langid |
|
import asyncio |
|
|
|
|
|
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
|
|
if not HF_HUB_TOKEN: |
|
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.") |
|
|
|
login(token=HF_HUB_TOKEN) |
|
client = InferenceClient(api_key=HF_HUB_TOKEN) |
|
|
|
app = FastAPI() |
|
|
|
|
|
|
|
def detect_language(user_input): |
|
try: |
|
lang, _ = langid.classify(user_input) |
|
return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported" |
|
except Exception as e: |
|
logging.error(f"Language detection error: {e}") |
|
return "unsupported" |
|
|
|
|
|
|
|
def generate_response(text): |
|
language = detect_language(text) |
|
|
|
if language == "hebrew": |
|
content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text |
|
model = "microsoft/Phi-3.5-mini-instruct" |
|
elif language == "english": |
|
content = "keep it short but tell your decision making process, " + text |
|
model = "mistralai/Mistral-Nemo-Instruct-2407" |
|
else: |
|
return "Sorry, I only support Hebrew and English." |
|
|
|
messages = [{"role": "user", "content": content}] |
|
|
|
completion = client.chat.completions.create( |
|
model=model, |
|
messages=messages, |
|
max_tokens=2048, |
|
temperature=0.5, |
|
top_p=0.7 |
|
) |
|
return completion.choices[0].message.content |
|
|
|
|
|
@app.post("/generate_response") |
|
async def generate_text(request: Request): |
|
try: |
|
data = await request.json() |
|
text = data.get("text", "").strip() |
|
if not text: |
|
return {"error": "No text provided"} |
|
|
|
response = generate_response(text) |
|
return {"response": response} |
|
except Exception as e: |
|
logging.error(f"Error processing request: {e}") |
|
return {"error": "An unexpected error occurred."} |
|
|
|
|
|
@app.get("/") |
|
async def root(): |
|
return {"message": "Decision Helper API is running!"} |
|
|
|
|
|
|
|
def run_bot(): |
|
logging.info("Starting Telegram bot...") |
|
subprocess.Popen(["python3", "bot.py"]) |
|
|
|
|
|
if __name__ == "__main__": |
|
run_bot() |
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|