File size: 7,721 Bytes
f4471ef
 
 
 
 
 
 
 
 
 
 
 
 
e07060b
926730e
c031cee
81ecad1
fbc6ce1
 
 
f4471ef
 
 
 
 
 
 
 
 
 
 
 
 
926730e
c50e236
 
 
 
476a74f
c50e236
c0f152a
 
 
 
476a74f
 
 
 
 
 
 
 
 
 
f4471ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476a74f
f4471ef
 
 
 
 
476a74f
f4471ef
476a74f
041821c
 
f4471ef
f383954
c0f152a
 
 
3d77054
 
 
 
 
 
 
 
 
f4471ef
3d77054
c0f152a
 
 
3d77054
c0f152a
926730e
476a74f
 
 
 
 
 
 
f4471ef
476a74f
 
 
 
 
 
 
 
 
 
 
 
f4471ef
 
 
 
 
 
 
 
 
 
926730e
 
 
 
 
 
 
2deac23
f4471ef
56c8a54
926730e
ee542af
926730e
2deac23
 
f4471ef
 
926730e
 
f4471ef
926730e
2deac23
f4471ef
926730e
f4471ef
926730e
f4471ef
 
c50e236
f4471ef
c50e236
 
f4471ef
c50e236
 
 
 
 
cc6b7b4
c50e236
 
 
 
f4471ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89f7adc
f4471ef
 
 
 
 
 
 
 
926730e
 
f4471ef
c338f24
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
from flask import Flask, request, jsonify, Response, stream_with_context, render_template
import hmac, hashlib, secrets, time, logging, json, requests, os
from datetime import datetime, timedelta, timezone
# from dotenv import load_dotenv;load_dotenv()

# Logging Configuration (Console Only)
logging.basicConfig(
    level=logging.INFO,  # Set logging level to INFO
    format="%(asctime)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler()]  # Log to console only
)

logger = logging.getLogger(__name__)  # Logger instance
app = Flask(__name__)

# 🔑 Secret key for API authentication
SECRET_KEY = os.getenv("SECRET_KEY")
SPECIAL_API_KEY = os.getenv("SPECIAL_API_KEY")
ENDPOINT = os.getenv("ENDPOINT")
SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
EXCEPTION = os.getenv("EXCEPTION")
AVAILABLE_MODELS = [
    "deepseek-r1",
    "gpt-4-turbo",
    "gpt-4",
    "gpt-3.5-turbo",
    "mistral-7b",
    "gemini-pro",
    "claude-3-sonnet",
    "claude-3-haiku",
    "llama-3-8b",
    "llama-3-70b"
]

# Track API statistics
api_usage = {}  # Stores {api_key: {"count": X, "reset_time": timestamp}}

# ✅ Request Limit Configuration
REQUEST_LIMIT = 10 # Max requests per day

# Track API statistics
request_count = 0
start_time = time.time()

# Validate API Key
def validate_api_key(api_key):
    parts = api_key.split("-")
    if len(parts) != 3 or parts[0] != "TrueSyncAI":
        return False
    
    random_part, received_signature = parts[1], parts[2]
    expected_signature = hmac.new(SECRET_KEY.encode(), random_part.encode(), hashlib.sha256).hexdigest()[:16]
    return expected_signature == received_signature

# ✅ Generator function for streaming response
def stream_response(api_payload):
    try:
        response = requests.post(
            ENDPOINT,
            json=api_payload,
            stream=True
        )

        if response.status_code == 200 and response.ok:
            for value in response.iter_lines(decode_unicode=True):
                if value and "[DONE]" not in value:
                    try:
                        data = json.loads(value[6:])
                        content = data['choices'][0]['delta']['content']
                        yield content
                    except:
                        continue
        else:
            yield f"Status Code: {response.status_code}, Response: {response.content}"
    except:
        yield EXCEPTION

# ✅ Function for normal (non-streaming) response
def non_stream_response(api_payload) -> str:
    try:
        response = requests.post(ENDPOINT, json=api_payload)
        if response.status_code==200 and  response.ok:
            return response.json()["choices"][0]["message"]["content"]
        else:
            return f"Status Code: {response.status_code}, Response: {response.content}"
    except:
        return EXCEPTION

@app.route('/')
def home():
    return render_template("index.html")

@app.route('/status', methods=['GET'])
def status():
    global request_count
    uptime_seconds = int(time.time() - start_time)

    # Convert uptime to days, hours, minutes, and seconds
    days = uptime_seconds // 86400
    hours = (uptime_seconds % 86400) // 3600
    minutes = (uptime_seconds % 3600) // 60
    seconds = uptime_seconds % 60

    uptime_str = f"{days} days, {hours} hours, {minutes} minutes and {seconds} seconds"
    logger.info(f"📊 Status Check | Uptime: {uptime_str} | Total Requests: {request_count}")

    return jsonify({
        "status": "API is running",
        "total_requests": request_count,
        "uptime": uptime_str
    })

@app.route("/usage", methods=["GET"])
def usage():
    api_key = request.args.get("api_key")

    if not api_key or not validate_api_key(api_key):
        return jsonify({"error": "Invalid API Key"}), 401
    
    now = datetime.now(timezone.utc)  # ✅ Use timezone-aware datetime
    user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
    
    remaining_requests = max(0, REQUEST_LIMIT - user_data["count"])
    reset_time = user_data["reset_time"].strftime("%Y-%m-%d %H:%M:%S UTC")

    return jsonify({
        "api_key": api_key,
        "requests_used": user_data["count"],
        "remaining_requests": remaining_requests,
        "reset_time": reset_time
    })

@app.route("/v1/models", methods=["GET"])
def get_available_models():
    """
    Returns a list of available AI models.
    """
    logger.info("📜 Model list requested")
    return jsonify({
        "models": ", ".join(AVAILABLE_MODELS),
    })

# Generate API Key
@app.route("/generate_api_key", methods=["POST"])
def generate_api_key():
    random_part = secrets.token_hex(16)
    signature = hmac.new(SECRET_KEY.encode(), random_part.encode(), hashlib.sha256).hexdigest()[:16]
    api_key = f"TrueSyncAI-{random_part}-{signature}"
    return jsonify({"api_key": api_key})
    
# ✅ Chat API with configurable parameters
@app.route("/v1/chat/completions", methods=["POST"])
def chat():
    global request_count
    data = request.json
    api_key = data.get("api_key")
    message = data.get("message", "").strip()

    logger.info(f"🔹 Incoming Chat Request | API Key: {api_key} | Message: {message}")
    
    if not api_key or not validate_api_key(api_key):
        logger.warning("❌ Invalid API Key Attempt")
        return jsonify({"error": "Invalid API Key"}), 401
    
    # Validate required message field
    if not message:
        logger.warning("⚠️ Empty message received")
        return jsonify({"error": "Message cannot be empty"}), 400
    
    # ✅ Apply Limit to the Specific API Key
    if api_key == SPECIAL_API_KEY:
        now = datetime.now(timezone.utc)
        user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
        
        # Reset count if reset time has passed
        if now >= user_data["reset_time"]:
            user_data = {"count": 0, "reset_time": now + timedelta(days=1)}
        
        # Block requests if limit exceeded
        if user_data["count"] >= REQUEST_LIMIT:
            return jsonify({"error": "Request limit reached. This is a testing API key for developers. The limit resets daily. Please wait or use a different key."}), 429
        
        # Increase request count
        user_data["count"] += 1
        api_usage[api_key] = user_data  # Update storage

    # Extract optional parameters with defaults
    stream = data.get("stream", False)
    model = data.get("model", "deepseek-r1")
    temperature = data.get("temperature", 0.2)
    presence_penalty = data.get("presence_penalty", 0)
    frequency_penalty = data.get("frequency_penalty", 0)
    top_p = data.get("top_p", 1)
    max_tokens = data.get("max_tokens", 4000)

    api_payload = {
        "messages": [
            {'role': 'system', 'content': SYSTEM_PROMPT},
            {'role': 'user', 'content': message}
        ],
        "stream": stream,
        "model": model,
        "temperature": temperature,
        "presence_penalty": presence_penalty,
        "frequency_penalty": frequency_penalty,
        "top_p": top_p,
        "max_tokens": max_tokens
    }

    request_count += 1
    
    # Return streaming or normal response based on user request
    if stream:
        return Response(stream_with_context(stream_response(api_payload)), content_type="text/plain")
    else:
        response_text = non_stream_response(api_payload)
        logger.info(f"✅ Response Sent | API Key: {api_key} | Token Usage: {len(response_text)} chars")
        return jsonify({"response": response_text})

if __name__ == "__main__":
    logger.info("🚀 TrueSyncAI API is starting...")
    app.run(host="0.0.0.0", port=7860)  # Hugging Face Spaces default port