File size: 3,597 Bytes
e3278e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#### What this does ####
#    On success, logs events to Promptlayer
import os
import traceback

from pydantic import BaseModel

import litellm


class PromptLayerLogger:
    # Class variables or attributes
    def __init__(self):
        # Instance variables
        self.key = os.getenv("PROMPTLAYER_API_KEY")

    def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose):
        # Method definition
        try:
            new_kwargs = {}
            new_kwargs["model"] = kwargs["model"]
            new_kwargs["messages"] = kwargs["messages"]

            # add kwargs["optional_params"] to new_kwargs
            for optional_param in kwargs["optional_params"]:
                new_kwargs[optional_param] = kwargs["optional_params"][optional_param]

            # Extract PromptLayer tags from metadata, if such exists
            tags = []
            metadata = {}
            if "metadata" in kwargs["litellm_params"]:
                if "pl_tags" in kwargs["litellm_params"]["metadata"]:
                    tags = kwargs["litellm_params"]["metadata"]["pl_tags"]

                # Remove "pl_tags" from metadata
                metadata = {
                    k: v
                    for k, v in kwargs["litellm_params"]["metadata"].items()
                    if k != "pl_tags"
                }

            print_verbose(
                f"Prompt Layer Logging - Enters logging function for model kwargs: {new_kwargs}\n, response: {response_obj}"
            )

            # python-openai >= 1.0.0 returns Pydantic objects instead of jsons
            if isinstance(response_obj, BaseModel):
                response_obj = response_obj.model_dump()

            request_response = litellm.module_level_client.post(
                "https://api.promptlayer.com/rest/track-request",
                json={
                    "function_name": "openai.ChatCompletion.create",
                    "kwargs": new_kwargs,
                    "tags": tags,
                    "request_response": dict(response_obj),
                    "request_start_time": int(start_time.timestamp()),
                    "request_end_time": int(end_time.timestamp()),
                    "api_key": self.key,
                    # Optional params for PromptLayer
                    # "prompt_id": "<PROMPT ID>",
                    # "prompt_input_variables": "<Dictionary of variables for prompt>",
                    # "prompt_version":1,
                },
            )

            response_json = request_response.json()
            if not request_response.json().get("success", False):
                raise Exception("Promptlayer did not successfully log the response!")

            print_verbose(
                f"Prompt Layer Logging: success - final response object: {request_response.text}"
            )

            if "request_id" in response_json:
                if metadata:
                    response = litellm.module_level_client.post(
                        "https://api.promptlayer.com/rest/track-metadata",
                        json={
                            "request_id": response_json["request_id"],
                            "api_key": self.key,
                            "metadata": metadata,
                        },
                    )
                    print_verbose(
                        f"Prompt Layer Logging: success - metadata post response object: {response.text}"
                    )

        except Exception:
            print_verbose(f"error: Prompt Layer Error - {traceback.format_exc()}")
            pass