File size: 2,443 Bytes
a263183
e70ffc1
12d3e1a
e70ffc1
 
1de075a
e70ffc1
 
 
d32424b
12d3e1a
1de075a
99fb68e
 
 
 
1de075a
 
12d3e1a
 
 
 
 
d32424b
 
e70ffc1
dc376b6
e70ffc1
 
 
dc376b6
e70ffc1
 
a263183
 
1de075a
a263183
e70ffc1
 
dc376b6
e70ffc1
 
 
 
 
1de075a
 
 
 
 
e2aa91f
1de075a
e2aa91f
1de075a
e2aa91f
1de075a
 
e2aa91f
 
 
 
1de075a
e2aa91f
 
 
1de075a
e2aa91f
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from typing import Literal, cast
from pydantic import SecretStr
from setup.environment import default_model
from setup.easy_imports import ChatOpenAI, ChatGoogleGenerativeAI
import os
from langchain_core.messages import HumanMessage

deepseek_api_key = cast(str, os.environ.get("DEEPSEEKK_API_KEY"))
google_api_key = cast(str, os.environ.get("GOOGLE_API_KEY_PEIXE"))
open_ai_token = cast(str, os.environ.get("OPENAI_API_KEY"))

Google_llms = Literal[
    "gemini-2.5-pro-preview-05-06",
    "gemini-2.0-flash",
    "gemini-2.0-flash-lite",
    "gemini-2.5-flash-preview-04-17",
]


class LLM:
    def __init__(self):
        pass

    def open_ai(self, model="gpt-4o-mini"):
        return ChatOpenAI(api_key=SecretStr(open_ai_token), model=model)

    def deepseek(self, model="deepseek-chat"):
        return ChatOpenAI(
            api_key=SecretStr(deepseek_api_key),
            base_url="https://api.deepseek.com/v1",
            model=model,
        )

    def google_gemini(
        self,
        model: Google_llms = "gemini-2.0-flash",
    ):
        return ChatGoogleGenerativeAI(
            api_key=SecretStr(google_api_key),
            model=model,
            temperature=0,
            max_tokens=None,
            timeout=None,
            max_retries=2,
        )

    async def google_gemini_ainvoke(
        self,
        prompt: str,
        model: Google_llms = "gemini-2.0-flash",
        max_retries: int = 3,
    ):
        for attempt in range(max_retries):
            try:
                response = await self.google_gemini(model).ainvoke(
                    [HumanMessage(content=prompt)]
                )

                if isinstance(response.content, list):
                    response.content = "\n".join(response.content)  # type: ignore

                return response
            except Exception as e:
                model = "gemini-2.0-flash"
                print(f"Attempt {attempt + 1} failed with error: {e}")

        # Final attempt fallback logic (optional)
        try:
            print("Final attempt with fallback model...")
            response = await self.open_ai("chat-gpt-4o-mini").ainvoke(
                [HumanMessage(content=prompt)]
            )
            return response
        except Exception as e:
            raise Exception(
                "Failed to generate the final document after 5 retries and the fallback attempt with chat-gpt-4o-mini."
            ) from e