Update app.py
Browse files
app.py
CHANGED
@@ -1,58 +1,27 @@
|
|
1 |
-
#
|
2 |
-
#pip install gradio
|
3 |
-
#pip install tensorflow
|
4 |
-
#requirements.txt中包含以下依赖库:
|
5 |
-
#protobuf==3.20.0
|
6 |
-
#transformers==4.27.1
|
7 |
-
#icetk
|
8 |
-
#cpm_kernels
|
9 |
|
10 |
-
import
|
11 |
-
import os
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
)
|
16 |
-
|
17 |
-
|
18 |
-
'pip install "modelscope" --upgrade -f https://pypi.org/project/modelscope/'
|
19 |
-
)
|
20 |
-
|
21 |
-
os.system('pip install transformers -U')
|
22 |
-
|
23 |
-
os.system('pip install sentencepiece')
|
24 |
-
|
25 |
-
os.system('pip install accelerate')
|
26 |
-
|
27 |
-
from modelscope.pipelines import pipeline
|
28 |
-
from modelscope.utils.constant import Tasks
|
29 |
-
|
30 |
-
#pipe = pipeline(Tasks.text_generation, model='damo/nlp_gpt3_text-generation_13B')
|
31 |
-
# pipe = pipeline(task=Tasks.chat, model='ZhipuAI/ChatGLM-6B', model_revision='v1.0.16', device_map='auto')
|
32 |
-
pipe = pipeline(task=Tasks.chat, model='ZhipuAI/chatglm2-6b', model_revision='v1.0.2', device_map='auto')
|
33 |
-
# import torch
|
34 |
-
# model = torch.hub.load('ZhipuAI', 'chatglm2-6b', offload_folder='/root/.cache/modelscope/hub')
|
35 |
-
# pipe = pipeline(task=Tasks.chat, model=model, model_revision='v1.0.2',device_map='auto')
|
36 |
|
37 |
def text_generation(prompt: str, style: str) -> str:
|
38 |
prompt = '以“' + prompt + '”为主题,撰写一段' + style + ',字数在100字左右'
|
39 |
print('功能:' + style + ',提示文案:' + prompt)
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
inputs = {'text':prompt, 'history': []}
|
44 |
-
result = pipe(inputs)
|
45 |
-
|
46 |
-
# print('生成文案:' + result['text'])
|
47 |
-
print('生成文案:' + result['response'])
|
48 |
-
|
49 |
-
# return result['text']
|
50 |
-
return result['response']
|
51 |
|
52 |
|
53 |
css_style = "#fixed_size_img {height: 240px;} "
|
54 |
|
55 |
-
title = "
|
56 |
description = '''
|
57 |
本服务的主要应用场景涵盖多种文案输入生成和续写,例如用户可以自行输入各种内容,之后服务将会对其进行回答、续写或者按照指令进行回复。
|
58 |
'''
|
@@ -69,7 +38,7 @@ with gr.Blocks(title=title, css=css_style) as demo:
|
|
69 |
"
|
70 |
>
|
71 |
<h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 32px; margin-bottom: 7px;">
|
72 |
-
|
73 |
</h1>
|
74 |
<h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 16px; margin-bottom: 7px;">
|
75 |
by宁侠
|
@@ -80,7 +49,6 @@ with gr.Blocks(title=title, css=css_style) as demo:
|
|
80 |
|
81 |
gr.Markdown(description)
|
82 |
with gr.Row():
|
83 |
-
# radio_style = gr.Radio(label="模型选择", choices=["中文-base", "中文-large", "中文-1.3B", "中文-2.7B", "中文-13B", "中文-30B", "广告文案", "夸夸机器人", "诗词创作", "作文创作"], value="中文-base")
|
84 |
radio_style = gr.Radio(label="功能选择", choices=["小红书笔记", "小红书标题", "公众号文案", "朋友圈微商文案", "商品卖点", "商品描述", "商品种草文案", "商品好评", "广告标题", "创意广告", "产品起名", "视频拍摄剧本", "短视频口播稿", "直播脚本", "短视频拍摄提纲", "SEO文章", "产品slogan", "夸夸机器人", "诗词创作", "作文创作"], value="小红书笔记")
|
85 |
with gr.Row():
|
86 |
text_input = gr.Textbox(label="提示文案", value="探索西夏:沙漠风情与多元文化的西北之旅")
|
@@ -94,4 +62,4 @@ with gr.Blocks(title=title, css=css_style) as demo:
|
|
94 |
btn_submit.click(text_generation, inputs=[text_input, radio_style], outputs=text_output)
|
95 |
# btn_clear清除画布
|
96 |
|
97 |
-
demo.queue(api_open=False).launch(debug=True)
|
|
|
1 |
+
# https://github.com/THUDM/ChatGLM2-6B
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
from transformers import AutoTokenizer, AutoModel
|
|
|
4 |
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
|
6 |
+
# model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True, device='cuda')
|
7 |
+
# model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
|
8 |
+
model = AutoModel.from_pretrained("THUDM/chatglm2-6b-int4",trust_remote_code=True).float()
|
9 |
+
model = model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
def text_generation(prompt: str, style: str) -> str:
|
12 |
prompt = '以“' + prompt + '”为主题,撰写一段' + style + ',字数在100字左右'
|
13 |
print('功能:' + style + ',提示文案:' + prompt)
|
14 |
+
|
15 |
+
response, history = model.chat(tokenizer, prompt, history=[])
|
16 |
+
|
17 |
+
print('生成文案:' + response)
|
18 |
|
19 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
|
22 |
css_style = "#fixed_size_img {height: 240px;} "
|
23 |
|
24 |
+
title = "文案创作 by宁侠"
|
25 |
description = '''
|
26 |
本服务的主要应用场景涵盖多种文案输入生成和续写,例如用户可以自行输入各种内容,之后服务将会对其进行回答、续写或者按照指令进行回复。
|
27 |
'''
|
|
|
38 |
"
|
39 |
>
|
40 |
<h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 32px; margin-bottom: 7px;">
|
41 |
+
文案创作
|
42 |
</h1>
|
43 |
<h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 16px; margin-bottom: 7px;">
|
44 |
by宁侠
|
|
|
49 |
|
50 |
gr.Markdown(description)
|
51 |
with gr.Row():
|
|
|
52 |
radio_style = gr.Radio(label="功能选择", choices=["小红书笔记", "小红书标题", "公众号文案", "朋友圈微商文案", "商品卖点", "商品描述", "商品种草文案", "商品好评", "广告标题", "创意广告", "产品起名", "视频拍摄剧本", "短视频口播稿", "直播脚本", "短视频拍摄提纲", "SEO文章", "产品slogan", "夸夸机器人", "诗词创作", "作文创作"], value="小红书笔记")
|
53 |
with gr.Row():
|
54 |
text_input = gr.Textbox(label="提示文案", value="探索西夏:沙漠风情与多元文化的西北之旅")
|
|
|
62 |
btn_submit.click(text_generation, inputs=[text_input, radio_style], outputs=text_output)
|
63 |
# btn_clear清除画布
|
64 |
|
65 |
+
demo.queue(api_open=False).launch(debug=True)
|