sanbo commited on
Commit
40e0258
·
1 Parent(s): d1a8738

update sth. at 2024-12-23 23:39:16

Browse files
Files changed (3) hide show
  1. Dockerfile +17 -0
  2. app241224.py +156 -0
  3. requirements.txt +2 -0
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 使用官方的 Python 3.11 作为基础镜像
2
+ FROM python:3.11-slim
3
+
4
+ # 设置工作目录
5
+ WORKDIR /app
6
+
7
+ # 复制当前目录的所有文件到容器中的 /app 目录
8
+ COPY . /app
9
+
10
+ # 安装依赖
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # 暴露端口 7860
14
+ EXPOSE 7860
15
+
16
+ # 运行 Flask 应用
17
+ CMD ["python", "app241224.py"]
app241224.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ curl -X GET http://localhost:7680/api/models
3
+
4
+ curl -X POST http://127.0.0.1:7680/hf/v1/chat/completions \
5
+ -H "Content-Type: application/json" \
6
+ -d '{
7
+ "prompt": "你是什么模型?"
8
+ }'
9
+
10
+
11
+ """
12
+ import json
13
+ import uuid # 用于生成随机用户 ID
14
+
15
+ import requests
16
+ from flask import Flask, request, Response
17
+
18
+ app = Flask(__name__)
19
+ # 全局字典用于保存用户的上下文对话
20
+ user_contexts = {}
21
+ MAX_HISTORY_LENGTH = 15 # 最大上下文历史长度
22
+
23
+
24
+ def get_models():
25
+ models = {
26
+ "object": "list",
27
+ "data": [
28
+ {"id": "Qwen2.5-72B", "object": "model", "created": 0, "owned_by": "Qwen"},
29
+ {"id": "Llama-3.1-Nemotron-70B", "object": "model", "created": 0, "owned_by": "Nemotron"},
30
+ {"id": "NVLM-D-72B", "object": "model", "created": 0, "owned_by": "NVDIA"},
31
+ {"id": "DeepSeek-Coder-V2", "object": "model", "created": 0, "owned_by": "DeepSeek"},
32
+ {"id": "Qwen2.5-Coder-32B", "object": "model", "created": 0, "owned_by": "Qwen"},
33
+ ]
34
+ }
35
+ return json.dumps(models)
36
+
37
+
38
+ def chat_completion(
39
+ user_prompt, user_id: str = None, system_prompt="You are a helpful assistant.", model="Qwen2.5-72B",
40
+ project="DecentralGPT", stream=False, temperature=0.3, max_tokens=1024, top_p=0.5,
41
+ frequency_penalty=0, presence_penalty=0):
42
+ """处理用户请求并保留上下文"""
43
+ url = 'https://usa-chat.degpt.ai/api/v0/chat/completion/proxy'
44
+ headers = {
45
+ 'accept': 'application/json',
46
+ 'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
47
+ 'content-type': 'application/json',
48
+ 'dnt': '1',
49
+ 'origin': 'https://www.degpt.ai',
50
+ 'priority': 'u=1, i',
51
+ 'referer': 'https://www.degpt.ai/',
52
+ 'sec-ch-ua': 'Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
53
+ 'sec-ch-ua-mobile': '?0',
54
+ 'sec-ch-ua-platform': '"macOS"',
55
+ 'sec-fetch-dest': 'empty',
56
+ 'sec-fetch-mode': 'cors',
57
+ 'sec-fetch-site': 'same-site',
58
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
59
+ }
60
+
61
+ # 初始化或更新用户的对话历史
62
+ if user_id is not None:
63
+ if user_id not in user_contexts:
64
+ user_contexts[user_id] = [{"role": "system", "content": system_prompt}]
65
+
66
+ user_contexts[user_id].append({"role": "user", "content": user_prompt})
67
+
68
+ # 检查是否需要修剪历史记录,保留 `system` 提示词
69
+ while len(user_contexts[user_id]) > MAX_HISTORY_LENGTH:
70
+ # 删除最早的用户问题和系统回复,但保留 `system` 提示词
71
+ if len(user_contexts[user_id]) > 2:
72
+ # 检查删除的条目是否有匹配的系统回复,如果没有,只删除用户输入
73
+ if user_contexts[user_id][2]["role"] == "user":
74
+ user_contexts[user_id] = [user_contexts[user_id][0]] + user_contexts[user_id][2:]
75
+ else:
76
+ user_contexts[user_id] = [user_contexts[user_id][0]] + user_contexts[user_id][2:]
77
+ else:
78
+ break
79
+
80
+ messages = user_contexts[user_id]
81
+ else:
82
+ # 如果没有提供 user_id,不保留上下文
83
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
84
+
85
+ payload = {
86
+ "model": model,
87
+ "messages": messages,
88
+ "project": project,
89
+ "stream": stream,
90
+ "temperature": temperature,
91
+ "max_tokens": max_tokens,
92
+ "top_p": top_p,
93
+ "frequency_penalty": frequency_penalty,
94
+ "presence_penalty": presence_penalty
95
+ }
96
+
97
+ try:
98
+ response = requests.post(url, headers=headers, json=payload)
99
+ response.encoding = 'utf-8'
100
+ response.raise_for_status()
101
+ ## print(response.text)
102
+
103
+ # 获取响应并添加到上下文
104
+ response_content = response.json()["choices"][0]["message"]["content"]
105
+ # print(
106
+ # f"=========== {user_id}:{user_prompt} ====================\r\n请求内容:{messages}\r\n完整响应:{response.text}")
107
+
108
+ # 将系统的回复添加到用户上下文中
109
+ if user_id is not None:
110
+ user_contexts[user_id].append({"role": "assistant", "content": response_content})
111
+ return response.text
112
+ except requests.exceptions.RequestException as e:
113
+ print(f"请求失败: {e}")
114
+ return "请求失败,请检查网络或参数配置。"
115
+ except (KeyError, IndexError) as e:
116
+ print(f"解析响应时出错: {e}")
117
+ return "解析响应内容失败。"
118
+ return {}
119
+
120
+
121
+ @app.route('/api/models', methods=['GET'])
122
+ @app.route('/api/v1/models', methods=['GET'])
123
+ @app.route('/hf/v1/models', methods=['GET'])
124
+ def models():
125
+ """返回可用模型列表"""
126
+ return get_models()
127
+
128
+
129
+ @app.route('/api/chat/completion', methods=['POST'])
130
+ @app.route('/api/v1/chat/completions', methods=['POST'])
131
+ @app.route('/hf/v1/chat/completions', methods=['POST'])
132
+ def chat_completion_api():
133
+ """处理用户请求并保留上下文"""
134
+ data = request.json
135
+ user_prompt = data.get("prompt")
136
+ user_id = data.get("user_id", str(uuid.uuid4())) # 如果未提供 user_id,生成随机值
137
+
138
+ response_content = chat_completion(
139
+ user_prompt,
140
+ user_id=user_id
141
+ )
142
+
143
+ # maybe \uxxxx
144
+ # return jsonify(response_content)
145
+ ## maybe \"xxx\"
146
+ # return Response(
147
+ # json.dumps(response_content, ensure_ascii=False),
148
+ # content_type="application/json; charset=utf-8"
149
+ # )
150
+ # support Chinese
151
+ if isinstance(response_content, str): # 如果已经是 JSON 字符串
152
+ return Response(response_content, content_type="application/json; charset=utf-8")
153
+
154
+
155
+ if __name__ == '__main__':
156
+ app.run(host='0.0.0.0', port=7860)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Flask==2.2.3
2
+ requests==2.31.0