# new app
import logging
import os
from fastapi import FastAPI, Request
from contextlib import asynccontextmanager
from transformers import pipeline
import langid
from huggingface_hub import InferenceClient, login
import socket
import time

# Global variables
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")

def current_time_gmt():
    return time.gmtime().tm_hour+2,':',time.gmtime().tm_min,':',time.gmtime().tm_sec
    
# Verify Hugging Face token
if not HF_HUB_TOKEN:
     raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.")
login(token=HF_HUB_TOKEN)

client = InferenceClient(api_key=HF_HUB_TOKEN)

# Load Hebrew and English text generation models
# lang_generator = pipeline("text-generation", model="microsoft/Phi-3.5-mini-instruct")


# Function to detect language
def detect_language(user_input):
    try:
        lang, _ = langid.classify(user_input) # langid.classify returns a tuple (language, confidence)
        print(f"Detected language: {lang}, ", f"current time: {current_time_gmt()}") 
        return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
    except Exception as e:
        print(f"Language detection error: {e}")
        return "unsupported"


def generate_response(text):
    language = detect_language(text)
    print(f"Detected language: {language}, ", f"current time: {current_time_gmt()}")
    
    #template = "Use the text in the question as context to answer the question at the end.\
    #If you don't know the answer, just say that you don't know, don't try to make up an answer."
    
    if language == "hebrew" or language == "english":
        messages = [
            { "role": "user", "content": text }
        ]
        print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
        
        completion = client.chat.completions.create(    
            model="microsoft/Phi-3.5-mini-instruct", 
        	messages=messages, 
        	max_tokens=400,
            temperature=0.5,
            top_p=0.7 
        )
        print("\ncompletion: ", completion.choices[0].message, f"\ncurrent time: {current_time_gmt()}")
        return completion.choices[0].message
        
    return "Sorry, I only support Hebrew and English."

    

    # if language == "hebrew" or language == "english":
    #     # hebrew_generator = pipeline("text-generation", model="onlplab/alephbert-base")
    #     output = lang_generator(text, max_length=250, truncation=True)
    #     print(f"Model output: {output}, ", f"current time: {current_time_gmt()}")  # Debugging
    #     return output[0]["generated_text"]
    
    # elif language == "english":
    #     #english_generator = pipeline("text-generation", model="mistralai/Mistral-Nemo-Instruct-2407", max_new_tokens=128)
    #     # english_generator = pipeline("text-generation", model="distilgpt2")
    #     output = english_generator(text, max_length=100, truncation=True)
    #     print(f"English model output: {output}, ", f"current time: {current_time_gmt()}")  # Debugging
    #     return output[0]["generated_text"]
    
    # return "Sorry, I only support Hebrew and English."


# FastAPI lifespan event
@asynccontextmanager
async def lifespan(app: FastAPI):
    print("Starting application...")
    yield  # Wait until app closes
    print("Shutting down application...")


# Create FastAPI app
app = FastAPI(lifespan=lifespan)


@app.get("/")
async def root():
    return {"message": "Decision Helper API is running!"}


@app.post("/generate_response")
async def generate_text(request: Request):
    try:
        data = await request.json()
        if not data or "text" not in data:
            logging.error("Invalid request received")
            return {"error": "Invalid request. Please send JSON with a 'text' field."}

        text = data["text"].strip()
        if not text:
            return {"error": "No text provided"}

        print(f"Received text: {text}")  # Debugging
        
        response = generate_response(text)
        print(f"Generated response: {response}")  # Debugging
        return {"response": response}

    except Exception as e:
        logging.error(f"Error processing request: {e}")
        return {"error": "An unexpected error occurred."}


# Run the server
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)