Spaces:
Runtime error
Runtime error
update configs
Browse files
app.py
CHANGED
@@ -8,14 +8,20 @@ all_input = st.text_area('模型输入', value="""
|
|
8 |
今天,我们正式发布名为 DeeplySorry 的大规模神经网络模型,它可以代替您向您珍惜的亲人、朋友、爱人道歉。\n""", height=100)
|
9 |
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def completion(prompt):
|
12 |
start = time.monotonic()
|
13 |
resp = requests.post('https://welm.weixin.qq.com/v1/completions', json={
|
14 |
'prompt': prompt,
|
15 |
-
'model': '
|
16 |
-
'max_tokens':
|
17 |
-
'temperature':
|
18 |
-
'top_p':
|
19 |
'top_k': 0.0,
|
20 |
'n': 5,
|
21 |
'stop': None,
|
|
|
8 |
今天,我们正式发布名为 DeeplySorry 的大规模神经网络模型,它可以代替您向您珍惜的亲人、朋友、爱人道歉。\n""", height=100)
|
9 |
|
10 |
|
11 |
+
with st.expander("配置"):
|
12 |
+
top_p = st.slider('top p', 0.0, 1.0, 0.95)
|
13 |
+
temperature = st.slider('temperature', 0.0, 1.0, 0.8)
|
14 |
+
max_tokens = st.slider('max tokens', 4, 512, 64)
|
15 |
+
|
16 |
+
|
17 |
def completion(prompt):
|
18 |
start = time.monotonic()
|
19 |
resp = requests.post('https://welm.weixin.qq.com/v1/completions', json={
|
20 |
'prompt': prompt,
|
21 |
+
'model': 'large',
|
22 |
+
'max_tokens': max_tokens,
|
23 |
+
'temperature': temperature,
|
24 |
+
'top_p': top_p,
|
25 |
'top_k': 0.0,
|
26 |
'n': 5,
|
27 |
'stop': None,
|