File size: 2,418 Bytes
f384aea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import subprocess
import os
import logging
import time
from fastapi import FastAPI, Request
from transformers import pipeline
from huggingface_hub import InferenceClient, login
import langid
import asyncio

# Environment variables
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")

if not HF_HUB_TOKEN:
    raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")

login(token=HF_HUB_TOKEN)
client = InferenceClient(api_key=HF_HUB_TOKEN)

app = FastAPI()


# Function to detect language
def detect_language(user_input):
    try:
        lang, _ = langid.classify(user_input)
        return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
    except Exception as e:
        logging.error(f"Language detection error: {e}")
        return "unsupported"


# Function to generate response
def generate_response(text):
    language = detect_language(text)

    if language == "hebrew":
        content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
        model = "microsoft/Phi-3.5-mini-instruct"
    elif language == "english":
        content = "keep it short but tell your decision making process, " + text
        model = "mistralai/Mistral-Nemo-Instruct-2407"
    else:
        return "Sorry, I only support Hebrew and English."

    messages = [{"role": "user", "content": content}]
    
    completion = client.chat.completions.create( 
        model=model,
        messages=messages,
        max_tokens=2048,
        temperature=0.5,
        top_p=0.7
    )
    return completion.choices[0].message.content


@app.post("/generate_response")
async def generate_text(request: Request):
    try:
        data = await request.json()
        text = data.get("text", "").strip()
        if not text:
            return {"error": "No text provided"}

        response = generate_response(text)
        return {"response": response}
    except Exception as e:
        logging.error(f"Error processing request: {e}")
        return {"error": "An unexpected error occurred."}


@app.get("/")
async def root():
    return {"message": "Decision Helper API is running!"}


# Function to run bot.py
def run_bot():
    logging.info("Starting Telegram bot...")
    subprocess.Popen(["python3", "bot.py"])


if __name__ == "__main__":
    run_bot()
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)