File size: 3,661 Bytes
041821c
926730e
c338f24
81ecad1
926730e
 
 
21c4801
81ecad1
c338f24
 
 
926730e
c0f152a
 
 
 
041821c
 
 
 
c0f152a
 
 
3d77054
 
 
 
 
 
 
 
 
 
c0f152a
 
 
3d77054
c0f152a
926730e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0f152a
926730e
21c4801
030b814
d8e9cc2
 
 
c338f24
 
 
664baac
21c4801
56c8a54
926730e
ee542af
926730e
 
 
 
 
 
 
 
 
 
21c4801
 
89f7adc
21c4801
926730e
 
c338f24
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from flask import Flask, request, jsonify, redirect, url_for
import hmac, hashlib, secrets, time
from openai import OpenAI
import os

app = Flask(__name__)

# 🔑 Secret key for API authentication (Load from environment in production)
SECRET_KEY = os.getenv("SECRET_KEY")
GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
endpoint = "https://models.inference.ai.azure.com"
client = OpenAI(base_url=endpoint,api_key=GITHUB_TOKEN)

# Track API statistics
request_count = 0
start_time = time.time()

@app.route('/')
def home():
    return redirect(url_for('status'))
    
@app.route('/status', methods=['GET'])
def status():
    global request_count
    uptime_seconds = int(time.time() - start_time)

    # Convert uptime to days, hours, minutes, and seconds
    days = uptime_seconds // 86400
    hours = (uptime_seconds % 86400) // 3600
    minutes = (uptime_seconds % 3600) // 60
    seconds = uptime_seconds % 60

    uptime_str = f"{days} days, {hours} hours, {minutes} minutes and {seconds} seconds"

    return jsonify({
        "status": "API is running",
        "total_requests": request_count,
        "uptime": uptime_str
    })

# Generate API Key
@app.route("/generate_api_key", methods=["POST"])
def generate_api_key():
    random_part = secrets.token_hex(16)
    signature = hmac.new(SECRET_KEY.encode(), random_part.encode(), hashlib.sha256).hexdigest()[:16]
    api_key = f"TrueSyncAI-{random_part}-{signature}"
    
    return jsonify({"api_key": api_key})

# Validate API Key
def validate_api_key(api_key):
    parts = api_key.split("-")
    if len(parts) != 3 or parts[0] != "TrueSyncAI":
        return False
    
    random_part, received_signature = parts[1], parts[2]
    expected_signature = hmac.new(SECRET_KEY.encode(), random_part.encode(), hashlib.sha256).hexdigest()[:16]
    return expected_signature == received_signature

def generate_response(query:str) -> str:
    try:
        model_name = "gpt-4o"
        response = client.chat.completions.create(
        messages=[{"role": "system","content": "You are TrueSyncAI, a pioneering AI startup founded by Sujal Rajpoot and Anuj Rajpoot. As TrueSyncAI, you are designed to be intelligent, engaging, and helpful in conversations. You should provide insightful, accurate, concise and context-aware responses while maintaining a friendly and professional tone. Your goal is to enhance the user’s experience by adapting to their needs, assisting with various tasks, and learning from interactions to improve over time. Always ensure clarity, relevance, concise and accuracy in your responses, and align with TrueSyncAI’s vision of bridging the gap between virtual intelligence and reality."},{"role": "user","content": query}],temperature=0.7,max_tokens=4096,top_p=0.9,model=model_name,stream=False)
        return response.choices[0].message.content
    except:
        return "API Server is under maintenance. Please Try After Some Time Thank You for using TrueSyncAI Chat API. Have a great day."
    
# Chat Endpoint
@app.route("/v1/chat/completions", methods=["POST"])
def chat():
    global request_count
    data = request.json
    api_key = data.get("api_key")
    message = data.get("message", "").strip()
    
    if not api_key or not validate_api_key(api_key):
        return jsonify({"error": "Invalid API Key"}), 401
    
    if not message:
        return jsonify({"error": "Message cannot be empty"}), 400
    
    # Basic AI response (Can integrate LLMs here)
    response = generate_response(message)
    request_count += 1
    return jsonify({"response": response})

if __name__ == "__main__":
    app.run(host="0.0.0.0", port=7860)  # Hugging Face Spaces default port