Spaces:
Sleeping
Sleeping
Commit changes in app.py
Browse files
app.py
CHANGED
@@ -1,236 +1,177 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
-
|
6 |
-
ssl.create_default_context = partial(
|
7 |
-
ssl.create_default_context,
|
8 |
-
cafile=certifi.where()
|
9 |
-
)
|
10 |
|
11 |
import g4f.api
|
12 |
import g4f.Provider
|
13 |
|
14 |
-
import
|
15 |
-
import
|
16 |
-
import
|
17 |
-
|
18 |
-
from g4f.
|
19 |
-
from g4f.
|
20 |
-
from g4f.typing import Union, Optional, AsyncResult, Messages, ImagesType
|
21 |
-
from g4f.requests import StreamSession, raise_for_status
|
22 |
-
from g4f.providers.response import FinishReason, ToolCalls, Usage, Reasoning, ImageResponse
|
23 |
-
from g4f.errors import MissingAuthError, ResponseError
|
24 |
-
from g4f.image import to_data_uri
|
25 |
from g4f import debug
|
26 |
|
27 |
-
class
|
28 |
-
api_base = ""
|
29 |
-
supports_message_history = True
|
30 |
-
supports_system_message = True
|
31 |
-
default_model = ""
|
32 |
-
fallback_models = []
|
33 |
-
sort_models = True
|
34 |
-
verify = None
|
35 |
-
|
36 |
-
@classmethod
|
37 |
-
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
|
38 |
-
if not cls.models:
|
39 |
-
try:
|
40 |
-
headers = {}
|
41 |
-
if api_base is None:
|
42 |
-
api_base = cls.api_base
|
43 |
-
if api_key is not None:
|
44 |
-
headers["authorization"] = f"Bearer {api_key}"
|
45 |
-
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.verify)
|
46 |
-
raise_for_status(response)
|
47 |
-
data = response.json()
|
48 |
-
data = data.get("data") if isinstance(data, dict) else data
|
49 |
-
cls.image_models = [model.get("id") for model in data if model.get("image")]
|
50 |
-
cls.models = [model.get("id") for model in data]
|
51 |
-
if cls.sort_models:
|
52 |
-
cls.models.sort()
|
53 |
-
except Exception as e:
|
54 |
-
debug.log(e)
|
55 |
-
return cls.fallback_models
|
56 |
-
return cls.models
|
57 |
-
|
58 |
-
@classmethod
|
59 |
-
async def create_async_generator(
|
60 |
-
cls,
|
61 |
-
model: str,
|
62 |
-
messages: Messages,
|
63 |
-
proxy: str = None,
|
64 |
-
timeout: int = 120,
|
65 |
-
images: ImagesType = None,
|
66 |
-
api_key: str = None,
|
67 |
-
api_endpoint: str = None,
|
68 |
-
api_base: str = None,
|
69 |
-
temperature: float = None,
|
70 |
-
max_tokens: int = None,
|
71 |
-
top_p: float = None,
|
72 |
-
stop: Union[str, list[str]] = None,
|
73 |
-
stream: bool = False,
|
74 |
-
prompt: str = None,
|
75 |
-
headers: dict = None,
|
76 |
-
impersonate: str = None,
|
77 |
-
tools: Optional[list] = None,
|
78 |
-
extra_data: dict = {},
|
79 |
-
**kwargs
|
80 |
-
) -> AsyncResult:
|
81 |
-
if cls.needs_auth and api_key is None:
|
82 |
-
raise MissingAuthError('Add a "api_key"')
|
83 |
-
async with StreamSession(
|
84 |
-
proxy=proxy,
|
85 |
-
headers=cls.get_headers(stream, api_key, headers),
|
86 |
-
timeout=timeout,
|
87 |
-
impersonate=impersonate,
|
88 |
-
) as session:
|
89 |
-
model = cls.get_model(model, api_key=api_key, api_base=api_base)
|
90 |
-
if api_base is None:
|
91 |
-
api_base = cls.api_base
|
92 |
-
|
93 |
-
# Proxy for image generation feature
|
94 |
-
if prompt and model and model in cls.image_models:
|
95 |
-
data = {
|
96 |
-
"prompt": messages[-1]["content"] if prompt is None else prompt,
|
97 |
-
"model": model,
|
98 |
-
}
|
99 |
-
async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.verify) as response:
|
100 |
-
data = await response.json()
|
101 |
-
cls.raise_error(data)
|
102 |
-
await raise_for_status(response)
|
103 |
-
yield ImageResponse([image["url"] for image in data["data"]], prompt)
|
104 |
-
return
|
105 |
-
|
106 |
-
if images is not None and messages:
|
107 |
-
if not model and hasattr(cls, "default_vision_model"):
|
108 |
-
model = cls.default_vision_model
|
109 |
-
last_message = messages[-1].copy()
|
110 |
-
last_message["content"] = [
|
111 |
-
*[{
|
112 |
-
"type": "image_url",
|
113 |
-
"image_url": {"url": to_data_uri(image)}
|
114 |
-
} for image, _ in images],
|
115 |
-
{
|
116 |
-
"type": "text",
|
117 |
-
"text": messages[-1]["content"]
|
118 |
-
}
|
119 |
-
]
|
120 |
-
messages[-1] = last_message
|
121 |
-
data = filter_none(
|
122 |
-
messages=messages,
|
123 |
-
model=model,
|
124 |
-
temperature=temperature,
|
125 |
-
max_tokens=max_tokens,
|
126 |
-
top_p=top_p,
|
127 |
-
stop=stop,
|
128 |
-
stream=stream,
|
129 |
-
tools=tools,
|
130 |
-
**extra_data
|
131 |
-
)
|
132 |
-
if api_endpoint is None:
|
133 |
-
api_endpoint = f"{api_base.rstrip('/')}/chat/completions"
|
134 |
-
async with session.post(api_endpoint, json=data, ssl=cls.verify) as response:
|
135 |
-
content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json")
|
136 |
-
if content_type.startswith("application/json"):
|
137 |
-
data = await response.json()
|
138 |
-
cls.raise_error(data)
|
139 |
-
await raise_for_status(response)
|
140 |
-
choice = data["choices"][0]
|
141 |
-
if "content" in choice["message"] and choice["message"]["content"]:
|
142 |
-
yield choice["message"]["content"].strip()
|
143 |
-
elif "tool_calls" in choice["message"]:
|
144 |
-
yield ToolCalls(choice["message"]["tool_calls"])
|
145 |
-
if "usage" in data:
|
146 |
-
yield Usage(**data["usage"])
|
147 |
-
if "finish_reason" in choice and choice["finish_reason"] is not None:
|
148 |
-
yield FinishReason(choice["finish_reason"])
|
149 |
-
return
|
150 |
-
elif content_type.startswith("text/event-stream"):
|
151 |
-
await raise_for_status(response)
|
152 |
-
first = True
|
153 |
-
is_thinking = 0
|
154 |
-
async for line in response.iter_lines():
|
155 |
-
if line.startswith(b"data: "):
|
156 |
-
chunk = line[6:]
|
157 |
-
if chunk == b"[DONE]":
|
158 |
-
break
|
159 |
-
data = json.loads(chunk)
|
160 |
-
cls.raise_error(data)
|
161 |
-
choice = data["choices"][0]
|
162 |
-
if "content" in choice["delta"] and choice["delta"]["content"]:
|
163 |
-
delta = choice["delta"]["content"]
|
164 |
-
if first:
|
165 |
-
delta = delta.lstrip()
|
166 |
-
if delta:
|
167 |
-
first = False
|
168 |
-
if is_thinking:
|
169 |
-
if "</think>" in delta:
|
170 |
-
yield Reasoning(None, f"Finished in {round(time.time()-is_thinking, 2)} seconds")
|
171 |
-
is_thinking = 0
|
172 |
-
else:
|
173 |
-
yield Reasoning(delta)
|
174 |
-
elif "<think>" in delta:
|
175 |
-
is_thinking = time.time()
|
176 |
-
yield Reasoning(None, "Is thinking...")
|
177 |
-
else:
|
178 |
-
yield delta
|
179 |
-
if "usage" in data and data["usage"]:
|
180 |
-
yield Usage(**data["usage"])
|
181 |
-
if "finish_reason" in choice and choice["finish_reason"] is not None:
|
182 |
-
yield FinishReason(choice["finish_reason"])
|
183 |
-
break
|
184 |
-
else:
|
185 |
-
await raise_for_status(response)
|
186 |
-
raise ResponseError(f"Not supported content-type: {content_type}")
|
187 |
-
|
188 |
-
@classmethod
|
189 |
-
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
190 |
-
return {
|
191 |
-
"Accept": "text/event-stream" if stream else "application/json",
|
192 |
-
"Content-Type": "application/json",
|
193 |
-
**(
|
194 |
-
{"Authorization": f"Bearer {api_key}"}
|
195 |
-
if api_key is not None else {}
|
196 |
-
),
|
197 |
-
**({} if headers is None else headers)
|
198 |
-
}
|
199 |
-
|
200 |
-
class Feature(OpenaiTemplate):
|
201 |
url = "https://ahe.hopto.org"
|
202 |
working = True
|
203 |
-
|
204 |
|
205 |
models = [
|
206 |
-
*
|
207 |
*g4f.Provider.HuggingChat.get_models(),
|
208 |
-
"
|
|
|
|
|
|
|
209 |
]
|
210 |
|
211 |
@classmethod
|
212 |
-
def get_model(cls, model
|
213 |
-
if
|
214 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
elif model in g4f.Provider.OpenaiAccount.get_models():
|
216 |
-
|
217 |
elif model in g4f.Provider.HuggingChat.get_models():
|
218 |
-
|
219 |
else:
|
220 |
-
|
221 |
return model
|
222 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
@classmethod
|
224 |
async def create_async_generator(
|
225 |
cls,
|
226 |
model: str,
|
227 |
messages: Messages,
|
228 |
api_key: str = None,
|
|
|
|
|
229 |
**kwargs
|
230 |
) -> AsyncResult:
|
231 |
-
|
232 |
-
yield chunk
|
233 |
-
|
234 |
-
g4f.Provider.__map__["Feature"] = Feature
|
235 |
|
236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from g4f.providers.response import Reasoning, JsonConversation, FinishReason
|
4 |
+
from g4f.typing import AsyncResult, Messages
|
5 |
+
import json
|
6 |
+
import re
|
7 |
+
import time
|
8 |
+
from urllib.parse import quote_plus
|
9 |
+
from fastapi import FastAPI, Response, Request
|
10 |
+
from fastapi.responses import RedirectResponse
|
11 |
|
12 |
+
from g4f.image import images_dir, copy_images
|
|
|
|
|
|
|
|
|
13 |
|
14 |
import g4f.api
|
15 |
import g4f.Provider
|
16 |
|
17 |
+
from g4f.Provider.base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
18 |
+
from g4f.typing import AsyncResult, Messages
|
19 |
+
from g4f.requests import StreamSession
|
20 |
+
from g4f.providers.response import ProviderInfo, JsonConversation, PreviewResponse, SynthesizeData, TitleGeneration, RequestLogin
|
21 |
+
from g4f.providers.response import Parameters, FinishReason, Usage, Reasoning
|
22 |
+
from g4f.errors import ModelNotSupportedError
|
|
|
|
|
|
|
|
|
|
|
23 |
from g4f import debug
|
24 |
|
25 |
+
class BackendApi(AsyncGeneratorProvider, ProviderModelMixin):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
url = "https://ahe.hopto.org"
|
27 |
working = True
|
28 |
+
ssl = False
|
29 |
|
30 |
models = [
|
31 |
+
*g4f.Provider.OpenaiAccount.get_models(),
|
32 |
*g4f.Provider.HuggingChat.get_models(),
|
33 |
+
"flux",
|
34 |
+
"flux-pro",
|
35 |
+
"MiniMax-01",
|
36 |
+
"Microsoft Copilot",
|
37 |
]
|
38 |
|
39 |
@classmethod
|
40 |
+
def get_model(cls, model):
|
41 |
+
if "MiniMax" in model:
|
42 |
+
model = "MiniMax"
|
43 |
+
elif "Copilot" in model:
|
44 |
+
model = "Copilot"
|
45 |
+
elif "FLUX" in model:
|
46 |
+
model = f"flux-{model.split('-')[-1]}"
|
47 |
+
elif "flux" in model:
|
48 |
+
model = model.split(' ')[-1]
|
49 |
elif model in g4f.Provider.OpenaiAccount.get_models():
|
50 |
+
pass
|
51 |
elif model in g4f.Provider.HuggingChat.get_models():
|
52 |
+
pass
|
53 |
else:
|
54 |
+
raise ModelNotSupportedError(f"Model: {model}")
|
55 |
return model
|
56 |
|
57 |
+
@classmethod
|
58 |
+
def get_provider(cls, model):
|
59 |
+
if model.startswith("MiniMax"):
|
60 |
+
return "HailuoAI"
|
61 |
+
elif model == "Copilot":
|
62 |
+
return "CopilotAccount"
|
63 |
+
elif model in g4f.Provider.OpenaiAccount.get_models():
|
64 |
+
return "OpenaiAccount"
|
65 |
+
elif model in g4f.Provider.HuggingChat.get_models():
|
66 |
+
return "HuggingChat"
|
67 |
+
return None
|
68 |
+
|
69 |
@classmethod
|
70 |
async def create_async_generator(
|
71 |
cls,
|
72 |
model: str,
|
73 |
messages: Messages,
|
74 |
api_key: str = None,
|
75 |
+
proxy: str = None,
|
76 |
+
timeout: int = 0,
|
77 |
**kwargs
|
78 |
) -> AsyncResult:
|
79 |
+
debug.log(f"{__name__}: {api_key}")
|
|
|
|
|
|
|
80 |
|
81 |
+
async with StreamSession(
|
82 |
+
proxy=proxy,
|
83 |
+
headers={"Accept": "text/event-stream"},
|
84 |
+
timeout=timeout
|
85 |
+
) as session:
|
86 |
+
model = cls.get_model(model)
|
87 |
+
provider = cls.get_provider(model)
|
88 |
+
async with session.post(f"{cls.url}/backend-api/v2/conversation", json={
|
89 |
+
"model": model,
|
90 |
+
"messages": messages,
|
91 |
+
"provider": provider,
|
92 |
+
**kwargs
|
93 |
+
}, ssl=cls.ssl) as response:
|
94 |
+
async for line in response.iter_lines():
|
95 |
+
data = json.loads(line)
|
96 |
+
data_type = data.pop("type")
|
97 |
+
if data_type == "provider":
|
98 |
+
yield ProviderInfo(**data[data_type])
|
99 |
+
provider = data[data_type]["name"]
|
100 |
+
elif data_type == "conversation":
|
101 |
+
yield JsonConversation(**data[data_type][provider] if provider in data[data_type] else data[data_type][""])
|
102 |
+
elif data_type == "conversation_id":
|
103 |
+
pass
|
104 |
+
elif data_type == "message":
|
105 |
+
yield Exception(data)
|
106 |
+
elif data_type == "preview":
|
107 |
+
yield PreviewResponse(data[data_type])
|
108 |
+
elif data_type == "content":
|
109 |
+
def on_image(match):
|
110 |
+
extension = match.group(3).split(".")[-1].split("?")[0]
|
111 |
+
extension = "" if not extension or len(extension) > 4 else f".{extension}"
|
112 |
+
filename = f"{int(time.time())}_{quote_plus(match.group(1)[:100], '')}{extension}"
|
113 |
+
download_url = f"/download/{filename}?url={cls.url}{match.group(3)}"
|
114 |
+
return f"[![{match.group(1)}]({download_url})](/images/{filename})"
|
115 |
+
yield re.sub(r'\[\!\[(.+?)\]\(([^)]+?)\)\]\(([^)]+?)\)', on_image, data["content"])
|
116 |
+
elif data_type =="synthesize":
|
117 |
+
yield SynthesizeData(**data[data_type])
|
118 |
+
elif data_type == "parameters":
|
119 |
+
yield Parameters(**data[data_type])
|
120 |
+
elif data_type == "usage":
|
121 |
+
yield Usage(**data[data_type])
|
122 |
+
elif data_type == "reasoning":
|
123 |
+
yield Reasoning(**data)
|
124 |
+
elif data_type == "login":
|
125 |
+
pass
|
126 |
+
elif data_type == "title":
|
127 |
+
yield TitleGeneration(data[data_type])
|
128 |
+
elif data_type == "finish":
|
129 |
+
yield FinishReason(data[data_type]["reason"])
|
130 |
+
elif data_type == "log":
|
131 |
+
debug.log(data[data_type])
|
132 |
+
else:
|
133 |
+
debug.log(f"Unknown data: ({data_type}) {data}")
|
134 |
+
|
135 |
+
g4f.Provider.__map__["Feature"] = BackendApi
|
136 |
+
|
137 |
+
def create_app():
|
138 |
+
g4f.debug.logging = True
|
139 |
+
g4f.api.AppConfig.gui = True
|
140 |
+
g4f.api.AppConfig.demo = True
|
141 |
+
|
142 |
+
app = FastAPI()
|
143 |
+
|
144 |
+
# Add CORS middleware
|
145 |
+
app.add_middleware(
|
146 |
+
g4f.api.CORSMiddleware,
|
147 |
+
allow_origin_regex=".*",
|
148 |
+
allow_credentials=True,
|
149 |
+
allow_methods=["*"],
|
150 |
+
allow_headers=["*"],
|
151 |
+
)
|
152 |
+
|
153 |
+
api = g4f.api.Api(app)
|
154 |
+
|
155 |
+
api.register_routes()
|
156 |
+
api.register_authorization()
|
157 |
+
api.register_validation_exception_handler()
|
158 |
+
|
159 |
+
@app.get("/download/{filename}", response_class=RedirectResponse)
|
160 |
+
async def download(filename, request: Request):
|
161 |
+
filename = os.path.basename(filename)
|
162 |
+
target = os.path.join(images_dir, filename)
|
163 |
+
if not os.path.exists(target):
|
164 |
+
url = str(request.query_params).split("url=", 1)[1]
|
165 |
+
if url:
|
166 |
+
source_url = url.replace("%2F", "/").replace("%3A", ":").replace("%3F", "?").replace("%3D", "=")
|
167 |
+
await copy_images([source_url], target=target, ssl=False)
|
168 |
+
if not os.path.exists(target):
|
169 |
+
return Response(status_code=404)
|
170 |
+
return RedirectResponse(f"/images/{filename}")
|
171 |
+
|
172 |
+
gui_app = g4f.api.WSGIMiddleware(g4f.api.get_gui_app(g4f.api.AppConfig.demo))
|
173 |
+
app.mount("/", gui_app)
|
174 |
+
|
175 |
+
return app
|
176 |
+
|
177 |
+
app = create_app()
|
start
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
|
3 |
+
uvicorn app:app --port 8000 --reload
|