File size: 1,557 Bytes
495a9d1
1ef08ce
 
495a9d1
1ef08ce
 
 
 
 
 
a8d4c5b
 
 
 
a1807ec
301592a
 
1ef08ce
 
 
 
0fb6f39
301592a
 
 
1ef08ce
 
 
 
55915cb
bdb87bc
55915cb
 
a137c43
55915cb
 
 
 
 
 
 
 
1ef08ce
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
import time
import requests

st.header("DeeplySorry")

all_input = st.text_area('模型输入', value="""
今天,我们正式发布名为 DeeplySorry 的大规模神经网络模型,它可以代替您向您珍惜的亲人、朋友、爱人道歉。\n""", height=100)



top_p = st.slider('top_p', 0.0, 1.0, 0.95)
temperature = st.slider('temperature', 0.0, 1.0, 0.85)
max_tokens = st.slider('max tokens', 4, 512, 64)
model_type = st.selectbox('model', ('large', 'xl'))


def completion(prompt):
    start = time.monotonic()
    resp = requests.post('https://welm.weixin.qq.com/v1/completions', json={
        'prompt': prompt,
        'model': model_type,
        'max_tokens': max_tokens,
        'temperature': temperature,
        'top_p': top_p,
        'n': 5,
        'stop': None,
        # 'stop': [[13, 13]],
    }, headers={"Authorization": f"Bearer {st.secrets['token']}"})
    if resp.status_code != 200:
        st.error(f'Bad response: {resp}, {resp.text}')
    else:
        answers = resp.json()
        st.json(answers)
        answers = [c['text'] for c in answers['choices'] if c['text'] is not None]
        cols = st.columns(3)
        for idx, answer in enumerate(answers):
            if idx >= 3:
                break
            with cols[idx]:
                content = (prompt + answer).replace("\n", "\n\n")
                st.markdown(f'## 版本{idx}\n\n{content}')
    end = time.monotonic()
    st.text(f'耗时:{end - start}')


if st.button('开始生成/换一批'):
    completion(all_input)