Spaces:
Running
Running
Update utils/llms.py
Browse files- utils/llms.py +25 -15
utils/llms.py
CHANGED
@@ -5,20 +5,22 @@ import google.generativeai as genai
|
|
5 |
from g4f.client import Client
|
6 |
from litellm import completion
|
7 |
import random
|
|
|
8 |
|
9 |
from g4f.Provider import DeepInfraChat,Glider,Liaobots,Blackbox,ChatGptEs,LambdaChat,TypeGPT
|
10 |
-
|
11 |
-
os.environ
|
12 |
-
gemini_api_keys=["AIzaSyB7yKIdfW7Umv62G47BCdJjoHTJ9TeiAko","AIzaSyDtP05TyoIy9j0uPL7_wLEhgQEE75AZQSc","AIzaSyDOyjfqFhHmGlGJ2raX82XWTtmMcZxRshs"]
|
13 |
groq_api_keys=["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","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","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"]
|
14 |
-
#[,"AIzaSyBPfR-HG_HeUgLF0LYW1XQgQUxFF6jF_0U","AIzaSyBz01gZCb9kzZF3lNHuwy_iajWhi9ivyDk"]]
|
15 |
-
os.environ["GEMINI_API_KEY"] =random.choice(gemini_api_keys)
|
16 |
-
os.environ["TOGETHERAI_API_KEY"] ="30bed0b842ed3268372d57f588c22452798e9af96aa8d3129ba745ef226282a8"
|
17 |
DeepInfraChat.models = ["google/gemma-3-27b-it","deepseek-ai/DeepSeek-R1-Turbo","Qwen/QwQ-32B","deepseek-ai/DeepSeek-R1","deepseek-ai/DeepSeek-V3-0324","meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","meta-llama/Llama-4-Scout-17B-16E-Instruct","microsoft/Phi-4-multimodal-instruct"]
|
18 |
deepinframodels=["meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","microsoft/Phi-4-multimodal-instruct","google/gemma-3-27b-it","meta-llama/Llama-4-Scout-17B-16E-Instruct"]
|
19 |
REASONING_CORRESPONDANCE = {"DeepSeek-R1-Glider":Glider, "DeepSeekR1-LAMBDA":LambdaChat,"DeepSeekR1":DeepInfraChat,"deepseek-slow":TypeGPT}
|
20 |
REASONING_QWQ = {"qwq-32b":DeepInfraChat}
|
|
|
21 |
|
|
|
|
|
|
|
|
|
22 |
CHAT_CORRESPONDANCE = {"DeepSeek-V3":DeepInfraChat}
|
23 |
|
24 |
client = Client()
|
@@ -49,15 +51,23 @@ def clear():
|
|
49 |
continue
|
50 |
helper.q.task_done()
|
51 |
|
52 |
-
def gpt4(messages,model="gpt-4"):
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
def gpt4stream(messages,model,api_keys):
|
63 |
print(f"-------{model}--------")
|
|
|
5 |
from g4f.client import Client
|
6 |
from litellm import completion
|
7 |
import random
|
8 |
+
import json
|
9 |
|
10 |
from g4f.Provider import DeepInfraChat,Glider,Liaobots,Blackbox,ChatGptEs,LambdaChat,TypeGPT
|
11 |
+
|
12 |
+
gemini_api_keys=json.loads(os.environ.get("GEMINI_KEY_LIST"))
|
|
|
13 |
groq_api_keys=["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","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","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"]
|
|
|
|
|
|
|
14 |
DeepInfraChat.models = ["google/gemma-3-27b-it","deepseek-ai/DeepSeek-R1-Turbo","Qwen/QwQ-32B","deepseek-ai/DeepSeek-R1","deepseek-ai/DeepSeek-V3-0324","meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","meta-llama/Llama-4-Scout-17B-16E-Instruct","microsoft/Phi-4-multimodal-instruct"]
|
15 |
deepinframodels=["meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","microsoft/Phi-4-multimodal-instruct","google/gemma-3-27b-it","meta-llama/Llama-4-Scout-17B-16E-Instruct"]
|
16 |
REASONING_CORRESPONDANCE = {"DeepSeek-R1-Glider":Glider, "DeepSeekR1-LAMBDA":LambdaChat,"DeepSeekR1":DeepInfraChat,"deepseek-slow":TypeGPT}
|
17 |
REASONING_QWQ = {"qwq-32b":DeepInfraChat}
|
18 |
+
from openai import OpenAI
|
19 |
|
20 |
+
clienty = OpenAI(
|
21 |
+
base_url="https://openrouter.ai/api/v1",
|
22 |
+
api_key=os.environ.get("OPENROUTER_API_KEY"),
|
23 |
+
)
|
24 |
CHAT_CORRESPONDANCE = {"DeepSeek-V3":DeepInfraChat}
|
25 |
|
26 |
client = Client()
|
|
|
51 |
continue
|
52 |
helper.q.task_done()
|
53 |
|
54 |
+
def gpt4(messages,response_format,model="gpt-4"):
|
55 |
+
if response_format!=None:
|
56 |
+
completion = clienty.chat.completions.create(
|
57 |
+
model="google/gemini-2.0-flash-001",
|
58 |
+
messages= messages,
|
59 |
+
response_format=response_format,
|
60 |
+
)
|
61 |
+
return str(completion.choices[0].message.content)
|
62 |
+
else:
|
63 |
+
print(messages)
|
64 |
+
if len(messages) ==1:
|
65 |
+
messages[0]["role"]="user"
|
66 |
+
response = completion(
|
67 |
+
model="gemini/gemini-2.0-flash",
|
68 |
+
messages=messages
|
69 |
+
)
|
70 |
+
return str(response.choices[0].message.content)
|
71 |
|
72 |
def gpt4stream(messages,model,api_keys):
|
73 |
print(f"-------{model}--------")
|