Spaces:
Sleeping
Sleeping
import os | |
import requests | |
import json | |
from flask import Flask, request, jsonify | |
from ddtrace import patch_all, tracer | |
# Initialize Datadog tracing | |
patch_all() | |
app = Flask(__name__) | |
# Set up environment variables for Datadog tracing | |
os.environ['DD_LLMOBS_ENABLED'] = '1' | |
os.environ['DD_LLMOBS_ML_APP'] = 'anything-api' | |
os.environ['DD_LLMOBS_AGENTLESS_ENABLED'] = '1' | |
def handle_llm_call(): | |
# Extract data from the incoming request (e.g., message to LLM) | |
data = request.get_json() | |
message = data.get("message") | |
if not message: | |
return jsonify({"error": "No message provided"}), 400 | |
url = 'https://severian-anything.hf.space/api/v1/workspace/scoreboard/chat' | |
headers = { | |
'accept': 'application/json', | |
'Authorization': 'Bearer YOUR_TOKEN_HERE', | |
'Content-Type': 'application/json' | |
} | |
payload = { | |
"message": message, | |
"mode": "query" | |
} | |
# Trace the LLM API call | |
with tracer.trace("llm_api_call", service="anything-api", resource="chat", span_type="http") as span: | |
span.set_tag("llm.request.model", "anything-api") | |
span.set_tag("llm.request.input", message) | |
try: | |
# Make the actual call to the LLM API | |
response = requests.post(url, headers=headers, data=json.dumps(payload)) | |
response.raise_for_status() | |
response_data = response.json() | |
bot_response = response_data.get("textResponse") | |
span.set_tag("llm.response.output", bot_response) | |
return jsonify({"bot_response": bot_response}) | |
except requests.RequestException as e: | |
span.set_tag("error", True) | |
span.set_tag("error.msg", str(e)) | |
return jsonify({"error": f"Request failed: {e}"}), 500 | |
except Exception as e: | |
span.set_tag("error", True) | |
span.set_tag("error.msg", str(e)) | |
return jsonify({"error": f"An error occurred: {e}"}), 500 | |
if __name__ == "__main__": | |
app.run(host='0.0.0.0', port=7860) | |