sujalrajpoot commited on
Commit
f4471ef
·
verified ·
1 Parent(s): 5fdc646

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -40
app.py CHANGED
@@ -1,21 +1,36 @@
1
- from flask import Flask, request, jsonify
2
- import hmac, hashlib, secrets, time, os
3
- from openai import OpenAI
4
- from datetime import datetime, timedelta
5
- import logging
6
-
7
- logging.basicConfig(level=logging.INFO)
 
 
 
 
 
 
8
  app = Flask(__name__)
9
 
10
  # 🔑 Secret key for API authentication
11
  SECRET_KEY = os.getenv("SECRET_KEY")
12
- GITHUB_TOKEN = os.getenv("GITHUB_TOKEN")
13
  SPECIAL_API_KEY = os.getenv("SPECIAL_API_KEY")
14
  ENDPOINT = os.getenv("ENDPOINT")
15
  SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
16
- print(SYSTEM_PROMPT)
17
-
18
- client = OpenAI(base_url=ENDPOINT,api_key=GITHUB_TOKEN)
 
 
 
 
 
 
 
 
 
 
19
 
20
  # Track API statistics
21
  api_usage = {} # Stores {api_key: {"count": X, "reset_time": timestamp}}
@@ -37,28 +52,43 @@ def validate_api_key(api_key):
37
  expected_signature = hmac.new(SECRET_KEY.encode(), random_part.encode(), hashlib.sha256).hexdigest()[:16]
38
  return expected_signature == received_signature
39
 
40
- def generate_response(query:str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  try:
42
- model_name = "gpt-4o"
43
- response = client.chat.completions.create(
44
- messages=[{"role": "system","content": SYSTEM_PROMPT},{"role": "user","content": query}],temperature=0.7,max_tokens=4096,top_p=0.9,model=model_name,stream=False)
45
- return response.choices[0].message.content
 
46
  except:
47
- return "API Server is under maintenance. Please Try After Some Time Thank You for using TrueSyncAI Chat API. Have a great day."
48
 
49
  @app.route('/')
50
  def home():
51
- return """
52
- <html>
53
- <head>
54
- <title>TrueSyncAI</title>
55
- </head>
56
- <body style="text-align: center;">
57
- <h1>Welcome to TrueSyncAI</h1>
58
- <img src="https://huggingface.co/spaces/sujalrajpoot/truesyncai/resolve/main/TrueSyncAI.jpg" alt="TrueSyncAI Logo" width="500">
59
- </body>
60
- </html>
61
- """
62
 
63
  @app.route('/status', methods=['GET'])
64
  def status():
@@ -72,6 +102,7 @@ def status():
72
  seconds = uptime_seconds % 60
73
 
74
  uptime_str = f"{days} days, {hours} hours, {minutes} minutes and {seconds} seconds"
 
75
 
76
  return jsonify({
77
  "status": "API is running",
@@ -86,8 +117,7 @@ def usage():
86
  if not api_key or not validate_api_key(api_key):
87
  return jsonify({"error": "Invalid API Key"}), 401
88
 
89
- # Get usage data or return default values
90
- now = datetime.utcnow()
91
  user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
92
 
93
  remaining_requests = max(0, REQUEST_LIMIT - user_data["count"])
@@ -100,6 +130,16 @@ def usage():
100
  "reset_time": reset_time
101
  })
102
 
 
 
 
 
 
 
 
 
 
 
103
  # Generate API Key
104
  @app.route("/generate_api_key", methods=["POST"])
105
  def generate_api_key():
@@ -108,27 +148,31 @@ def generate_api_key():
108
  api_key = f"TrueSyncAI-{random_part}-{signature}"
109
  return jsonify({"api_key": api_key})
110
 
111
- # Chat Endpoint
112
  @app.route("/v1/chat/completions", methods=["POST"])
113
  def chat():
114
  global request_count
115
  data = request.json
116
  api_key = data.get("api_key")
117
  message = data.get("message", "").strip()
118
- logging.info(f"Request received: {data}")
 
119
 
120
  if not api_key or not validate_api_key(api_key):
 
121
  return jsonify({"error": "Invalid API Key"}), 401
122
 
 
123
  if not message:
 
124
  return jsonify({"error": "Message cannot be empty"}), 400
125
-
126
- # ✅ Apply Limit to the Specific API Key
127
  if api_key == SPECIAL_API_KEY:
128
- now = datetime.utcnow()
129
  user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
130
 
131
- # Reset count if the reset time has passed
132
  if now >= user_data["reset_time"]:
133
  user_data = {"count": 0, "reset_time": now + timedelta(days=1)}
134
 
@@ -139,11 +183,40 @@ def chat():
139
  # Increase request count
140
  user_data["count"] += 1
141
  api_usage[api_key] = user_data # Update storage
142
-
143
- # Basic AI response (Can integrate LLMs here)
144
- response = generate_response(message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  request_count += 1
146
- return jsonify({"response": response})
 
 
 
 
 
 
 
147
 
148
  if __name__ == "__main__":
 
149
  app.run(host="0.0.0.0", port=7860) # Hugging Face Spaces default port
 
1
+ from flask import Flask, request, jsonify, Response, stream_with_context, render_template
2
+ import hmac, hashlib, secrets, time, logging, json, requests, os
3
+ from datetime import datetime, timedelta, timezone
4
+ # from dotenv import load_dotenv;load_dotenv()
5
+
6
+ # Logging Configuration (Console Only)
7
+ logging.basicConfig(
8
+ level=logging.INFO, # Set logging level to INFO
9
+ format="%(asctime)s - %(levelname)s - %(message)s",
10
+ handlers=[logging.StreamHandler()] # Log to console only
11
+ )
12
+
13
+ logger = logging.getLogger(__name__) # Logger instance
14
  app = Flask(__name__)
15
 
16
  # 🔑 Secret key for API authentication
17
  SECRET_KEY = os.getenv("SECRET_KEY")
 
18
  SPECIAL_API_KEY = os.getenv("SPECIAL_API_KEY")
19
  ENDPOINT = os.getenv("ENDPOINT")
20
  SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
21
+ EXCEPTION = os.getenv("EXCEPTION")
22
+ AVAILABLE_MODELS = [
23
+ "deepseek-r1",
24
+ "gpt-4-turbo",
25
+ "gpt-4",
26
+ "gpt-3.5-turbo",
27
+ "mistral-7b",
28
+ "gemini-pro",
29
+ "claude-3-sonnet",
30
+ "claude-3-haiku",
31
+ "llama-3-8b",
32
+ "llama-3-70b"
33
+ ]
34
 
35
  # Track API statistics
36
  api_usage = {} # Stores {api_key: {"count": X, "reset_time": timestamp}}
 
52
  expected_signature = hmac.new(SECRET_KEY.encode(), random_part.encode(), hashlib.sha256).hexdigest()[:16]
53
  return expected_signature == received_signature
54
 
55
+ # Generator function for streaming response
56
+ def stream_response(api_payload):
57
+ try:
58
+ response = requests.post(
59
+ ENDPOINT,
60
+ json=api_payload,
61
+ stream=True
62
+ )
63
+
64
+ if response.status_code == 200 and response.ok:
65
+ for value in response.iter_lines(decode_unicode=True):
66
+ if value and "[DONE]" not in value:
67
+ try:
68
+ data = json.loads(value[6:])
69
+ content = data['choices'][0]['delta']['content']
70
+ yield content
71
+ except:
72
+ continue
73
+ else:
74
+ yield f"Status Code: {response.status_code}, Response: {response.content}"
75
+ except:
76
+ yield EXCEPTION
77
+
78
+ # ✅ Function for normal (non-streaming) response
79
+ def non_stream_response(api_payload) -> str:
80
  try:
81
+ response = requests.post(ENDPOINT, json=api_payload)
82
+ if response.status_code==200 and response.ok:
83
+ return response.json()["choices"][0]["message"]["content"]
84
+ else:
85
+ return f"Status Code: {response.status_code}, Response: {response.content}"
86
  except:
87
+ return EXCEPTION
88
 
89
  @app.route('/')
90
  def home():
91
+ return render_template("index.html")
 
 
 
 
 
 
 
 
 
 
92
 
93
  @app.route('/status', methods=['GET'])
94
  def status():
 
102
  seconds = uptime_seconds % 60
103
 
104
  uptime_str = f"{days} days, {hours} hours, {minutes} minutes and {seconds} seconds"
105
+ logger.info(f"📊 Status Check | Uptime: {uptime_str} | Total Requests: {request_count}")
106
 
107
  return jsonify({
108
  "status": "API is running",
 
117
  if not api_key or not validate_api_key(api_key):
118
  return jsonify({"error": "Invalid API Key"}), 401
119
 
120
+ now = datetime.now(timezone.utc) # Use timezone-aware datetime
 
121
  user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
122
 
123
  remaining_requests = max(0, REQUEST_LIMIT - user_data["count"])
 
130
  "reset_time": reset_time
131
  })
132
 
133
+ @app.route("/v1/models", methods=["GET"])
134
+ def get_available_models():
135
+ """
136
+ Returns a list of available AI models.
137
+ """
138
+ logger.info("📜 Model list requested")
139
+ return jsonify({
140
+ "models": ", ".join(AVAILABLE_MODELS),
141
+ })
142
+
143
  # Generate API Key
144
  @app.route("/generate_api_key", methods=["POST"])
145
  def generate_api_key():
 
148
  api_key = f"TrueSyncAI-{random_part}-{signature}"
149
  return jsonify({"api_key": api_key})
150
 
151
+ # Chat API with configurable parameters
152
  @app.route("/v1/chat/completions", methods=["POST"])
153
  def chat():
154
  global request_count
155
  data = request.json
156
  api_key = data.get("api_key")
157
  message = data.get("message", "").strip()
158
+
159
+ logger.info(f"🔹 Incoming Chat Request | API Key: {api_key} | Message: {message}")
160
 
161
  if not api_key or not validate_api_key(api_key):
162
+ logger.warning("❌ Invalid API Key Attempt")
163
  return jsonify({"error": "Invalid API Key"}), 401
164
 
165
+ # Validate required message field
166
  if not message:
167
+ logger.warning("⚠️ Empty message received")
168
  return jsonify({"error": "Message cannot be empty"}), 400
169
+
170
+ # ✅ Apply Limit to the Specific API Key
171
  if api_key == SPECIAL_API_KEY:
172
+ now = datetime.now(timezone.utc)
173
  user_data = api_usage.get(api_key, {"count": 0, "reset_time": now + timedelta(days=1)})
174
 
175
+ # Reset count if reset time has passed
176
  if now >= user_data["reset_time"]:
177
  user_data = {"count": 0, "reset_time": now + timedelta(days=1)}
178
 
 
183
  # Increase request count
184
  user_data["count"] += 1
185
  api_usage[api_key] = user_data # Update storage
186
+
187
+ # Extract optional parameters with defaults
188
+ stream = data.get("stream", False)
189
+ model = data.get("model", "deepseek-r1")
190
+ temperature = data.get("temperature", 0.2)
191
+ presence_penalty = data.get("presence_penalty", 0)
192
+ frequency_penalty = data.get("frequency_penalty", 0)
193
+ top_p = data.get("top_p", 1)
194
+ max_tokens = data.get("max_tokens", 4000)
195
+
196
+ api_payload = {
197
+ "messages": [
198
+ {'role': 'system', 'content': SYSTEM_PROMPT},
199
+ {'role': 'user', 'content': message}
200
+ ],
201
+ "stream": stream,
202
+ "model": model,
203
+ "temperature": temperature,
204
+ "presence_penalty": presence_penalty,
205
+ "frequency_penalty": frequency_penalty,
206
+ "top_p": top_p,
207
+ "max_tokens": max_tokens
208
+ }
209
+
210
  request_count += 1
211
+
212
+ # Return streaming or normal response based on user request
213
+ if stream:
214
+ return Response(stream_with_context(stream_response(api_payload)), content_type="text/plain")
215
+ else:
216
+ response_text = non_stream_response(api_payload)
217
+ logger.info(f"✅ Response Sent | API Key: {api_key} | Token Usage: {len(response_text)} chars")
218
+ return jsonify({"response": response_text})
219
 
220
  if __name__ == "__main__":
221
+ logger.info("🚀 TrueSyncAI API is starting...")
222
  app.run(host="0.0.0.0", port=7860) # Hugging Face Spaces default port