Spaces:
Running
Running
luanpoppe
commited on
Commit
·
1de075a
1
Parent(s):
a717db7
fix: google gemini
Browse files
_utils/langchain_utils/LLM_class.py
CHANGED
@@ -3,11 +3,16 @@ from pydantic import SecretStr
|
|
3 |
from setup.environment import default_model
|
4 |
from setup.easy_imports import ChatOpenAI, ChatGoogleGenerativeAI
|
5 |
import os
|
|
|
6 |
|
7 |
deepseek_api_key = cast(str, os.environ.get("DEEPSEEKK_API_KEY"))
|
8 |
google_api_key = cast(str, os.environ.get("GOOGLE_API_KEY_PEIXE"))
|
9 |
open_ai_token = cast(str, os.environ.get("OPENAI_API_KEY"))
|
10 |
|
|
|
|
|
|
|
|
|
11 |
|
12 |
class LLM:
|
13 |
def __init__(self):
|
@@ -25,9 +30,7 @@ class LLM:
|
|
25 |
|
26 |
def google_gemini(
|
27 |
self,
|
28 |
-
model:
|
29 |
-
"gemini-2.5-pro-exp-03-25", "gemini-2.0-flash", "gemini-2.0-flash-lite"
|
30 |
-
] = "gemini-2.0-flash",
|
31 |
):
|
32 |
return ChatGoogleGenerativeAI(
|
33 |
api_key=SecretStr(google_api_key),
|
@@ -37,3 +40,31 @@ class LLM:
|
|
37 |
timeout=None,
|
38 |
max_retries=2,
|
39 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from setup.environment import default_model
|
4 |
from setup.easy_imports import ChatOpenAI, ChatGoogleGenerativeAI
|
5 |
import os
|
6 |
+
from langchain_core.messages import HumanMessage
|
7 |
|
8 |
deepseek_api_key = cast(str, os.environ.get("DEEPSEEKK_API_KEY"))
|
9 |
google_api_key = cast(str, os.environ.get("GOOGLE_API_KEY_PEIXE"))
|
10 |
open_ai_token = cast(str, os.environ.get("OPENAI_API_KEY"))
|
11 |
|
12 |
+
Google_llms = Literal[
|
13 |
+
"gemini-2.5-pro-exp-03-25", "gemini-2.0-flash", "gemini-2.0-flash-lite"
|
14 |
+
]
|
15 |
+
|
16 |
|
17 |
class LLM:
|
18 |
def __init__(self):
|
|
|
30 |
|
31 |
def google_gemini(
|
32 |
self,
|
33 |
+
model: Google_llms = "gemini-2.0-flash",
|
|
|
|
|
34 |
):
|
35 |
return ChatGoogleGenerativeAI(
|
36 |
api_key=SecretStr(google_api_key),
|
|
|
40 |
timeout=None,
|
41 |
max_retries=2,
|
42 |
)
|
43 |
+
|
44 |
+
async def google_gemini_ainvoke(
|
45 |
+
self,
|
46 |
+
prompt: str,
|
47 |
+
model: Google_llms = "gemini-2.0-flash",
|
48 |
+
):
|
49 |
+
try:
|
50 |
+
response = await self.google_gemini(model).ainvoke(
|
51 |
+
[HumanMessage(content=prompt)]
|
52 |
+
)
|
53 |
+
return response
|
54 |
+
except:
|
55 |
+
try:
|
56 |
+
response = await self.google_gemini("gemini-2.0-flash").ainvoke(
|
57 |
+
[HumanMessage(content=prompt)]
|
58 |
+
)
|
59 |
+
return response
|
60 |
+
except:
|
61 |
+
try:
|
62 |
+
response = await self.google_gemini("gemini-2.0-flash").ainvoke(
|
63 |
+
[HumanMessage(content=prompt)]
|
64 |
+
)
|
65 |
+
return response
|
66 |
+
|
67 |
+
except:
|
68 |
+
Exception(
|
69 |
+
"Falha ao tentar gerar o documento final por 5 tentativas e também ao tentar na última tentativa com o chat-gpt 4o mini."
|
70 |
+
)
|