File size: 6,904 Bytes
c198907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5120594
c198907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
#!/usr/bin/env python
# coding: utf-8

# ## ChatGPT来了,更快的速度更低的价格

# In[ ]:



# In[ ]:


import openai
openai.ChatCompletion.create(
  model="gpt-3.5-turbo",
  messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Who won the world series in 2020?"},
        {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
        {"role": "user", "content": "Where was it played?"}
    ]
)




import openai
import os

OPENAI_API_KEY=os.environ.get("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
# 封装了一个 Conversation 类
class Conversation:
    # prompt 作为system 的 content,代表我们对这个聊天机器人的指令,
    # num_of_round 代表每次向ChatGPT 发起请求的时候,保留过去几轮会话。
    def __init__(self, prompt, num_of_round):
        self.prompt = prompt
        self.num_of_round = num_of_round
        self.messages = []
        self.messages.append({"role": "system", "content": self.prompt})

    #输入是一个 string 类型的 question,返回结果也是 string 类型的一条 message。
#     每次调用 ask 函数,都会向 ChatGPT 发起一个请求
# 在这个请求里,我们都会把最新的问题拼接到整个对话数组的最后,而在得到 ChatGPT 的回答之后也会把回答拼接上去。
    def ask(self, question):
        try:
            self.messages.append( {"role": "user", "content": question})
            response = openai.ChatCompletion.create(
                model="gpt-3.5-turbo",
                messages=self.messages,
                temperature=0.5,
                max_tokens=2048,
                top_p=1,
            )
        except Exception as e:
            print(e)
            return e

        message = response["choices"][0]["message"]["content"]
        self.messages.append({"role": "assistant", "content": message})
#         回答完之后,发现会话的轮数超过我们设置的 num_of_round,我们就去掉最前面的一轮会话
        if len(self.messages) > self.num_of_round*2 + 1:
            del self.messages[1:3]
        return message


# In[2]:


prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
1. 你的回答必须是中文
2. 回答限制在100个字以内"""
conv1 = Conversation(prompt, 3)
question1 = "你是谁?"
print("User : %s" % question1)
print("Assistant : %s\n" % conv1.ask(question1))

question2 = "请问鱼香肉丝怎么做?"
print("User : %s" % question2)
print("Assistant : %s\n" % conv1.ask(question2))

question3 = "那蚝油牛肉呢?"
print("User : %s" % question3)
print("Assistant : %s\n" % conv1.ask(question3))


# In[3]:


question4 = "我问你的第一个问题是什么?"
print("User : %s" % question4)
print("Assistant : %s\n" % conv1.ask(question4))





question5 = "我问你的第一个问题是什么?"
print("User : %s" % question5)
print("Assistant : %s\n" % conv1.ask(question5))







class Conversation2:
    def __init__(self, prompt, num_of_round):
        self.prompt = prompt
        self.num_of_round = num_of_round
        self.messages = []
        self.messages.append({"role": "system", "content": self.prompt})

    def ask(self, question):
        try:
            self.messages.append( {"role": "user", "content": question})
            response = openai.ChatCompletion.create(
                model="gpt-3.5-turbo",
                messages=self.messages,
                temperature=0.5,
                max_tokens=2048,
                top_p=1,
            )
        except Exception as e:
            print(e)
            return e

        message = response["choices"][0]["message"]["content"]
        num_of_tokens = response['usage']['total_tokens']
        self.messages.append({"role": "assistant", "content": message})
        
        if len(self.messages) > self.num_of_round*2 + 1:
            del self.messages[1:3]
        return message, num_of_tokens


# In[6]:


conv2 = Conversation2(prompt, 3)
questions = [question1, question2, question3, question4, question5]
for question in questions:
    answer, num_of_tokens = conv2.ask(question)
    print("询问 {%s} 消耗的token数量是 : %d" % (question, num_of_tokens))





import tiktoken
encoding = tiktoken.get_encoding("cl100k_base")

conv2 = Conversation2(prompt, 3)
question1 = "你是谁?"
answer1, num_of_tokens = conv2.ask(question1)
print("总共消耗的token数量是 : %d" % (num_of_tokens))

prompt_count = len(encoding.encode(prompt))
question1_count = len(encoding.encode(question1))
answer1_count = len(encoding.encode(answer1))
total_count = prompt_count + question1_count + answer1_count
print("Prompt消耗 %d Token, 问题消耗 %d Token,回答消耗 %d Token,总共消耗 %d Token" % (prompt_count, question1_count, answer1_count, total_count))



system_start_count = len(encoding.encode("<|im_start|>system\n"))
print(encoding.encode("<|im_start|>system\n"))
end_count = len(encoding.encode("<|im_end|>\n"))
print(encoding.encode("<|im_end|>\n"))
user_start_count = len(encoding.encode("<|im_start|>user\n"))
print(encoding.encode("<|im_start|>user\n"))
assistant_start_count = len(encoding.encode("<|im_start|>assistant\n"))
print(encoding.encode("<|im_start|>assistant\n"))

total_mark_count = system_start_count + user_start_count + assistant_start_count + end_count*2
print("系统拼接的标记消耗 %d Token" % total_mark_count)





get_ipython().run_line_magic('pip', 'install gradio')


# In[4]:


get_ipython().run_line_magic('pip', 'install --upgrade gradio')


# In[3]:


import gradio as gr
prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
1. 你的回答必须是中文
2. 回答限制在100个字以内"""
# 定义好了 system 这个系统角色的提示语,创建了一个 Conversation 对象。
conv = Conversation(prompt, 5)

# 通过 history 维护了整个会话的历史记录
def predict(input, history=[]):
    history.append(input)
    response = conv.ask(input)
    history.append(response)
#     通过 responses,将用户和 AI 的对话分组
    responses = [(u,b) for u,b in zip(history[::2], history[1::2])]
    return responses, history

# 最后,我们通过一段 with 代码,创建了对应的聊天界面。Gradio 提供了一个现成的Chatbot 组件,我们只需要调用它,然后提供一个文本输入框就好了。
with gr.Blocks(css="#chatbot{height:350px} .overflow-y-auto{height:500px}") as demo:
    chatbot = gr.Chatbot(elem_id="chatbot")
    state = gr.State([])

    with gr.Row():
        txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)

    txt.submit(predict, [txt, state], [chatbot, state])

demo.launch()


# In[ ]: