File size: 2,207 Bytes
a263183
e70ffc1
12d3e1a
e70ffc1
 
1de075a
e70ffc1
 
 
d32424b
12d3e1a
1de075a
 
 
 
12d3e1a
 
 
 
 
d32424b
 
e70ffc1
dc376b6
e70ffc1
 
 
dc376b6
e70ffc1
 
a263183
 
1de075a
a263183
e70ffc1
 
dc376b6
e70ffc1
 
 
 
 
1de075a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from typing import Literal, cast
from pydantic import SecretStr
from setup.environment import default_model
from setup.easy_imports import ChatOpenAI, ChatGoogleGenerativeAI
import os
from langchain_core.messages import HumanMessage

deepseek_api_key = cast(str, os.environ.get("DEEPSEEKK_API_KEY"))
google_api_key = cast(str, os.environ.get("GOOGLE_API_KEY_PEIXE"))
open_ai_token = cast(str, os.environ.get("OPENAI_API_KEY"))

Google_llms = Literal[
    "gemini-2.5-pro-exp-03-25", "gemini-2.0-flash", "gemini-2.0-flash-lite"
]


class LLM:
    def __init__(self):
        pass

    def open_ai(self, model="gpt-4o-mini"):
        return ChatOpenAI(api_key=SecretStr(open_ai_token), model=model)

    def deepseek(self, model="deepseek-chat"):
        return ChatOpenAI(
            api_key=SecretStr(deepseek_api_key),
            base_url="https://api.deepseek.com/v1",
            model=model,
        )

    def google_gemini(
        self,
        model: Google_llms = "gemini-2.0-flash",
    ):
        return ChatGoogleGenerativeAI(
            api_key=SecretStr(google_api_key),
            model=model,
            temperature=0,
            max_tokens=None,
            timeout=None,
            max_retries=2,
        )

    async def google_gemini_ainvoke(
        self,
        prompt: str,
        model: Google_llms = "gemini-2.0-flash",
    ):
        try:
            response = await self.google_gemini(model).ainvoke(
                [HumanMessage(content=prompt)]
            )
            return response
        except:
            try:
                response = await self.google_gemini("gemini-2.0-flash").ainvoke(
                    [HumanMessage(content=prompt)]
                )
                return response
            except:
                try:
                    response = await self.google_gemini("gemini-2.0-flash").ainvoke(
                        [HumanMessage(content=prompt)]
                    )
                    return response

                except:
                    Exception(
                        "Falha ao tentar gerar o documento final por 5 tentativas e também ao tentar na última tentativa com o chat-gpt 4o mini."
                    )