Spaces:
Runtime error
Runtime error
curry tang
commited on
Commit
·
6573f9c
1
Parent(s):
830f68e
update
Browse files
.env
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
DEEP_SEEK_API_KEY=
|
2 |
OPEN_ROUTER_API_KEY=
|
|
|
3 |
DEBUG=False
|
|
|
1 |
DEEP_SEEK_API_KEY=
|
2 |
OPEN_ROUTER_API_KEY=
|
3 |
+
TONGYI_API_KEY=
|
4 |
DEBUG=False
|
app.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
from langchain_core.messages import HumanMessage, AIMessage
|
3 |
-
from llm import DeepSeekLLM, OpenRouterLLM
|
4 |
from config import settings
|
5 |
|
6 |
|
7 |
deep_seek_llm = DeepSeekLLM(api_key=settings.deep_seek_api_key)
|
8 |
open_router_llm = OpenRouterLLM(api_key=settings.open_router_api_key)
|
|
|
9 |
|
10 |
|
11 |
def init_chat():
|
@@ -33,6 +34,8 @@ def update_chat(_provider: str, _chat, _model: str, _temperature: float, _max_to
|
|
33 |
_chat = deep_seek_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
|
34 |
if _provider == 'OpenRouter':
|
35 |
_chat = open_router_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
|
|
|
|
|
36 |
return _chat
|
37 |
|
38 |
|
@@ -51,7 +54,7 @@ with gr.Blocks() as app:
|
|
51 |
with gr.Column(scale=1, min_width=300):
|
52 |
with gr.Accordion('Select Model', open=True):
|
53 |
with gr.Column():
|
54 |
-
provider = gr.Dropdown(label='Provider', choices=['DeepSeek', 'OpenRouter'], value='DeepSeek')
|
55 |
|
56 |
@gr.render(inputs=provider)
|
57 |
def show_model_config_panel(_provider):
|
@@ -70,7 +73,7 @@ with gr.Blocks() as app:
|
|
70 |
label="Temperature",
|
71 |
key="temperature",
|
72 |
)
|
73 |
-
max_tokens = gr.
|
74 |
minimum=1024,
|
75 |
maximum=1024 * 20,
|
76 |
step=128,
|
@@ -108,7 +111,7 @@ with gr.Blocks() as app:
|
|
108 |
label="Temperature",
|
109 |
key="temperature",
|
110 |
)
|
111 |
-
max_tokens = gr.
|
112 |
minimum=1024,
|
113 |
maximum=1024 * 20,
|
114 |
step=128,
|
@@ -131,6 +134,44 @@ with gr.Blocks() as app:
|
|
131 |
inputs=[provider, chat_engine, model, temperature, max_tokens],
|
132 |
outputs=[chat_engine],
|
133 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
with gr.Tab('画图'):
|
136 |
with gr.Row():
|
|
|
1 |
import gradio as gr
|
2 |
from langchain_core.messages import HumanMessage, AIMessage
|
3 |
+
from llm import DeepSeekLLM, OpenRouterLLM, TongYiLLM
|
4 |
from config import settings
|
5 |
|
6 |
|
7 |
deep_seek_llm = DeepSeekLLM(api_key=settings.deep_seek_api_key)
|
8 |
open_router_llm = OpenRouterLLM(api_key=settings.open_router_api_key)
|
9 |
+
tongyi_llm = TongYiLLM(api_key=settings.tongyi_api_key)
|
10 |
|
11 |
|
12 |
def init_chat():
|
|
|
34 |
_chat = deep_seek_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
|
35 |
if _provider == 'OpenRouter':
|
36 |
_chat = open_router_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
|
37 |
+
if _provider == 'Tongyi':
|
38 |
+
_chat = tongyi_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
|
39 |
return _chat
|
40 |
|
41 |
|
|
|
54 |
with gr.Column(scale=1, min_width=300):
|
55 |
with gr.Accordion('Select Model', open=True):
|
56 |
with gr.Column():
|
57 |
+
provider = gr.Dropdown(label='Provider', choices=['DeepSeek', 'OpenRouter', 'Tongyi'], value='DeepSeek')
|
58 |
|
59 |
@gr.render(inputs=provider)
|
60 |
def show_model_config_panel(_provider):
|
|
|
73 |
label="Temperature",
|
74 |
key="temperature",
|
75 |
)
|
76 |
+
max_tokens = gr.Slider(
|
77 |
minimum=1024,
|
78 |
maximum=1024 * 20,
|
79 |
step=128,
|
|
|
111 |
label="Temperature",
|
112 |
key="temperature",
|
113 |
)
|
114 |
+
max_tokens = gr.Slider(
|
115 |
minimum=1024,
|
116 |
maximum=1024 * 20,
|
117 |
step=128,
|
|
|
134 |
inputs=[provider, chat_engine, model, temperature, max_tokens],
|
135 |
outputs=[chat_engine],
|
136 |
)
|
137 |
+
if _provider == 'Tongyi':
|
138 |
+
with gr.Column():
|
139 |
+
model = gr.Dropdown(
|
140 |
+
label='模型',
|
141 |
+
choices=tongyi_llm.support_models,
|
142 |
+
value=tongyi_llm.default_model
|
143 |
+
)
|
144 |
+
temperature = gr.Slider(
|
145 |
+
minimum=0.0,
|
146 |
+
maximum=1.0,
|
147 |
+
step=0.1,
|
148 |
+
value=tongyi_llm.default_temperature,
|
149 |
+
label="Temperature",
|
150 |
+
key="temperature",
|
151 |
+
)
|
152 |
+
max_tokens = gr.Slider(
|
153 |
+
minimum=1000,
|
154 |
+
maximum=2000,
|
155 |
+
step=100,
|
156 |
+
value=tongyi_llm.default_max_tokens,
|
157 |
+
label="Max Tokens",
|
158 |
+
key="max_tokens",
|
159 |
+
)
|
160 |
+
model.change(
|
161 |
+
fn=update_chat,
|
162 |
+
inputs=[provider, chat_engine, model, temperature, max_tokens],
|
163 |
+
outputs=[chat_engine],
|
164 |
+
)
|
165 |
+
temperature.change(
|
166 |
+
fn=update_chat,
|
167 |
+
inputs=[provider, chat_engine, model, temperature, max_tokens],
|
168 |
+
outputs=[chat_engine],
|
169 |
+
)
|
170 |
+
max_tokens.change(
|
171 |
+
fn=update_chat,
|
172 |
+
inputs=[provider, chat_engine, model, temperature, max_tokens],
|
173 |
+
outputs=[chat_engine],
|
174 |
+
)
|
175 |
|
176 |
with gr.Tab('画图'):
|
177 |
with gr.Row():
|
config.py
CHANGED
@@ -4,6 +4,7 @@ from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
4 |
class Settings(BaseSettings):
|
5 |
deep_seek_api_key: str
|
6 |
open_router_api_key: str
|
|
|
7 |
debug: bool
|
8 |
|
9 |
model_config = SettingsConfigDict(env_file=('.env', '.env.local'), env_file_encoding='utf-8')
|
|
|
4 |
class Settings(BaseSettings):
|
5 |
deep_seek_api_key: str
|
6 |
open_router_api_key: str
|
7 |
+
tongyi_api_key: str
|
8 |
debug: bool
|
9 |
|
10 |
model_config = SettingsConfigDict(env_file=('.env', '.env.local'), env_file_encoding='utf-8')
|
llm.py
CHANGED
@@ -3,11 +3,11 @@ from abc import ABC
|
|
3 |
from langchain_openai import ChatOpenAI
|
4 |
|
5 |
|
6 |
-
class
|
7 |
-
_support_models = ['deepseek-chat', 'deepseek-coder']
|
8 |
-
_base_url = 'https://api.deepseek.com/v1'
|
9 |
-
_default_model = 'deepseek-chat'
|
10 |
_api_key: str
|
|
|
|
|
|
|
11 |
_default_temperature: float = 0.5
|
12 |
_default_max_tokens: int = 4096
|
13 |
|
@@ -51,7 +51,13 @@ class DeepSeekLLM(ABC):
|
|
51 |
)
|
52 |
|
53 |
|
54 |
-
class
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
_support_models = [
|
56 |
'anthropic/claude-3.5-sonnet', 'openai/gpt-4o',
|
57 |
'nvidia/nemotron-4-340b-instruct', 'deepseek/deepseek-coder',
|
@@ -62,45 +68,10 @@ class OpenRouterLLM(ABC):
|
|
62 |
]
|
63 |
_base_url = 'https://openrouter.ai/api/v1'
|
64 |
_default_model = 'anthropic/claude-3.5-sonnet'
|
65 |
-
_api_key: str
|
66 |
-
_default_temperature: float = 0.5
|
67 |
-
_default_max_tokens: int = 4096
|
68 |
-
|
69 |
-
def __init__(self, *, api_key: str):
|
70 |
-
self._api_key = api_key
|
71 |
-
|
72 |
-
@property
|
73 |
-
def support_models(self) -> List[str]:
|
74 |
-
return self._support_models
|
75 |
-
|
76 |
-
@property
|
77 |
-
def default_model(self) -> str:
|
78 |
-
return self._default_model
|
79 |
-
|
80 |
-
@property
|
81 |
-
def base_url(self) -> str:
|
82 |
-
return self._base_url
|
83 |
-
|
84 |
-
@property
|
85 |
-
def api_key(self) -> str:
|
86 |
-
return self._api_key
|
87 |
|
88 |
-
@property
|
89 |
-
def default_temperature(self) -> float:
|
90 |
-
return self._default_temperature
|
91 |
-
|
92 |
-
@property
|
93 |
-
def default_max_tokens(self) -> int:
|
94 |
-
return self._default_max_tokens
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
model=model,
|
102 |
-
api_key=self.api_key,
|
103 |
-
base_url=self.base_url,
|
104 |
-
temperature=temperature,
|
105 |
-
max_tokens=max_tokens,
|
106 |
-
)
|
|
|
3 |
from langchain_openai import ChatOpenAI
|
4 |
|
5 |
|
6 |
+
class BaseLLM(ABC):
|
|
|
|
|
|
|
7 |
_api_key: str
|
8 |
+
_support_models: List[str]
|
9 |
+
_default_model: str
|
10 |
+
_base_url: str
|
11 |
_default_temperature: float = 0.5
|
12 |
_default_max_tokens: int = 4096
|
13 |
|
|
|
51 |
)
|
52 |
|
53 |
|
54 |
+
class DeepSeekLLM(BaseLLM):
|
55 |
+
_support_models = ['deepseek-chat', 'deepseek-coder']
|
56 |
+
_base_url = 'https://api.deepseek.com/v1'
|
57 |
+
_default_model = 'deepseek-chat'
|
58 |
+
|
59 |
+
|
60 |
+
class OpenRouterLLM(BaseLLM):
|
61 |
_support_models = [
|
62 |
'anthropic/claude-3.5-sonnet', 'openai/gpt-4o',
|
63 |
'nvidia/nemotron-4-340b-instruct', 'deepseek/deepseek-coder',
|
|
|
68 |
]
|
69 |
_base_url = 'https://openrouter.ai/api/v1'
|
70 |
_default_model = 'anthropic/claude-3.5-sonnet'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
+
class TongYiLLM(BaseLLM):
|
74 |
+
_support_models = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-long']
|
75 |
+
_default_model = 'qwen-turbo'
|
76 |
+
_base_url = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
|
77 |
+
_default_max_tokens: int = 2000
|
|
|
|
|
|
|
|
|
|
|
|