File size: 4,422 Bytes
b6fad30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#pip install "modelscope[cv]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html
#pip install gradio
#pip install tensorflow
#requirements.txt中包含以下依赖库:
#protobuf==3.20.0
#transformers==4.27.1
#icetk
#cpm_kernels

import gradio as gr
import os

os.system(
    'pip install --upgrade torch'
)

 os.system(
     'pip install "modelscope" --upgrade -f https://pypi.org/project/modelscope/'
 )

os.system('pip install transformers -U')

from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

#pipe = pipeline(Tasks.text_generation, model='damo/nlp_gpt3_text-generation_13B')
# pipe = pipeline(task=Tasks.chat, model='ZhipuAI/ChatGLM-6B', model_revision='v1.0.16', device_map='auto')
pipe = pipeline(task=Tasks.chat, model='ZhipuAI/chatglm2-6b', model_revision='v1.0.2', device_map='auto')
# import torch
# model = torch.hub.load('ZhipuAI', 'chatglm2-6b', offload_folder='/root/.cache/modelscope/hub')
# pipe = pipeline(task=Tasks.chat, model=model, model_revision='v1.0.2',device_map='auto')

def text_generation(prompt: str, style: str) -> str:
  prompt = '以“' + prompt + '”为主题,撰写一段' + style + ',字数在100字左右'
  print('功能:' + style + ',提示文案:' + prompt)

#  result = pipe(prompt)

  inputs = {'text':prompt, 'history': []}
  result = pipe(inputs)

#  print('生成文案:' + result['text'])
  print('生成文案:' + result['response'])

#  return result['text']
  return result['response']


css_style = "#fixed_size_img {height: 240px;} "

title = "AI文案 by宁侠"
description = '''
本服务的主要应用场景涵盖多种文案输入生成和续写,例如用户可以自行输入各种内容,之后服务将会对其进行回答、续写或者按照指令进行回复。
'''

with gr.Blocks(title=title, css=css_style) as demo:
    gr.HTML('''
      <div style="text-align: center; max-width: 720px; margin: 0 auto;">
                  <div
                    style="
                      display: inline-flex;
                      align-items: center;
                      gap: 0.8rem;
                      font-size: 1.75rem;
                    "
                  >
                    <h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 32px; margin-bottom: 7px;">
                      AI文案
                    </h1>
                    <h1 style="font-family: PingFangSC; font-weight: 500; line-height: 1.5em; font-size: 16px; margin-bottom: 7px;">
                      by宁侠
                    </h1>
                  </div>
                </div>
      ''')

    gr.Markdown(description)
    with gr.Row():
#        radio_style = gr.Radio(label="模型选择", choices=["中文-base", "中文-large", "中文-1.3B", "中文-2.7B", "中文-13B", "中文-30B", "广告文案", "夸夸机器人", "诗词创作", "作文创作"], value="中文-base")
        radio_style = gr.Radio(label="功能选择", choices=["小红书笔记", "小红书标题", "公众号文案", "朋友圈微商文案", "商品卖点", "商品描述", "商品种草文案", "商品好评", "广告标题", "创意广告", "产品起名", "视频拍摄剧本", "短视频口播稿", "直播脚本", "短视频拍摄提纲", "SEO文章", "产品slogan", "夸夸机器人", "诗词创作", "作文创作"], value="小红书笔记")
    with gr.Row():
        text_input = gr.Textbox(label="提示文案", value="探索西夏:沙漠风情与多元文化的西北之旅")
        text_output = gr.Textbox(label="AI创作")
    with gr.Row():
        btn_submit = gr.Button(value="一键创作", elem_id="blue_btn")
        # btn_clear = gr.Button(value="清除")

#    examples = gr.Examples(["以“个性iPhone手机壳”为主题,撰写一段朋友圈微商文案,字数在100字左右", "美化这句话:本服务主要应用于多种场景文案输入的生成和续写,比如用户可以自行尝试输入各种内容,然后让服务去回答、续写或者根据指令回复。"], inputs=[text_input], outputs=text_output)
    examples = gr.Examples(["探索西夏:沙漠风情与多元文化的西北之旅", "个性iPhone手机壳"], inputs=[text_input], outputs=text_output)
    btn_submit.click(text_generation, inputs=[text_input, radio_style], outputs=text_output)
    # btn_clear清除画布

demo.queue(api_open=False).launch(debug=True)