Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
import hmac, hashlib, secrets, time, os
|
3 |
from openai import OpenAI
|
|
|
4 |
import logging
|
5 |
|
6 |
logging.basicConfig(level=logging.INFO)
|
@@ -12,6 +13,13 @@ GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
|
|
12 |
endpoint = "https://models.inference.ai.azure.com"
|
13 |
client = OpenAI(base_url=endpoint,api_key=GITHUB_TOKEN)
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Track API statistics
|
16 |
request_count = 0
|
17 |
start_time = time.time()
|
@@ -91,6 +99,23 @@ def chat():
|
|
91 |
|
92 |
if not message:
|
93 |
return jsonify({"error": "Message cannot be empty"}), 400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
# Basic AI response (Can integrate LLMs here)
|
96 |
response = generate_response(message)
|
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
import hmac, hashlib, secrets, time, os
|
3 |
from openai import OpenAI
|
4 |
+
from datetime import datetime, timedelta
|
5 |
import logging
|
6 |
|
7 |
logging.basicConfig(level=logging.INFO)
|
|
|
13 |
endpoint = "https://models.inference.ai.azure.com"
|
14 |
client = OpenAI(base_url=endpoint,api_key=GITHUB_TOKEN)
|
15 |
|
16 |
+
# Track API statistics
|
17 |
+
api_usage = {} # Stores {api_key: {"count": X, "reset_time": timestamp}}
|
18 |
+
|
19 |
+
# ✅ Request Limit Configuration
|
20 |
+
REQUEST_LIMIT = 2 # Max requests per day
|
21 |
+
SPECIAL_API_KEY = "TrueSyncAI-888d3e23fa5801834aa385118e05bd72-394215579bc4e6ac" # The restricted API key
|
22 |
+
|
23 |
# Track API statistics
|
24 |
request_count = 0
|
25 |
start_time = time.time()
|
|
|
99 |
|
100 |
if not message:
|
101 |
return jsonify({"error": "Message cannot be empty"}), 400
|
102 |
+
|
103 |
+
# ✅ Apply Limit to the Specific API Key
|
104 |
+
if api_key == SPECIAL_API_KEY:
|
105 |
+
now = datetime.utcnow()
|
106 |
+
user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
|
107 |
+
|
108 |
+
# Reset count if the reset time has passed
|
109 |
+
if now >= user_data["reset_time"]:
|
110 |
+
user_data = {"count": 0, "reset_time": now + timedelta(days=1)}
|
111 |
+
|
112 |
+
# Block requests if limit exceeded
|
113 |
+
if user_data["count"] >= REQUEST_LIMIT:
|
114 |
+
return jsonify({"error": "Request limit reached. Try again tomorrow."}), 429
|
115 |
+
|
116 |
+
# Increase request count
|
117 |
+
user_data["count"] += 1
|
118 |
+
api_usage[api_key] = user_data # Update storage
|
119 |
|
120 |
# Basic AI response (Can integrate LLMs here)
|
121 |
response = generate_response(message)
|