File size: 4,478 Bytes
8353fd4
 
 
 
 
 
76cfe4d
 
0b4dc52
8353fd4
 
 
e4af8c9
8353fd4
e4af8c9
 
 
 
66ea586
e4af8c9
8353fd4
b0153c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8353fd4
 
b85203e
76cfe4d
8353fd4
 
b0153c0
 
8353fd4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b85203e
e4af8c9
e896479
d5d6ab5
 
8353fd4
 
 
 
 
 
 
 
 
b0153c0
 
 
 
 
 
 
e4af8c9
36b6e8a
 
b70baf7
b0153c0
8353fd4
 
 
 
 
d5d6ab5
8353fd4
d734be8
b70baf7
b0153c0
8e7d9a9
8353fd4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/usr/bin/env python

from __future__ import annotations

import gradio as gr

# from model import AppModel

MAINTENANCE_NOTICE='Sorry, due to computing resources issues, this space is under maintenance, and will be restored as soon as possible. '

DESCRIPTION = '''# <a href="https://github.com/THUDM/CogVideo">CogVideo</a>

Currently, this Space only supports the first stage of the CogVideo pipeline due to hardware limitations.

The model accepts only Chinese as input.
By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input.
Since the translation model may mistranslate, you may want to use the translation results from other translation services.
'''
NOTES = 'This app is adapted from <a href="https://github.com/hysts/CogVideo_demo">https://github.com/hysts/CogVideo_demo</a>. It would be recommended to use the repo if you want to run the app yourself.'
FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=THUDM.CogVideo" />'

import json
import requests

def post(
        text,
        translate,
        seed,
        only_first_stage,
        image_prompt
        ):
    url = 'https://ccb8is4fqtofrtdsfjebg.ml-platform-cn-beijing.volces.com/devinstance/di-20221130120908-bhpxq/proxy/6201/cogvideo-s1'
    headers = {
            "Content-Type": "application/json; charset=UTF-8",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
        }

    data = json.dumps({'text': text,
                    'translate': translate,
                    'seed': seed,
                    'only_first_stage': only_first_stage,
                    'image_prompt': image_prompt
                    })
    r = requests.post(url, data, headers=headers)

    translated_text = r.json()['data']['translated_text']
    result_video = r.json()['data']['result_video']
    return translated_text, result_video

def main():
    only_first_stage = True
    # model = AppModel(only_first_stage)

    with gr.Blocks(css='style.css') as demo:
        # gr.Markdown(MAINTENANCE_NOTICE)
        
        gr.Markdown(DESCRIPTION)

        with gr.Row():
            with gr.Column():
                with gr.Group():
                    text = gr.Textbox(label='Input Text')
                    translate = gr.Checkbox(label='Translate to Chinese',
                                            value=False)
                    seed = gr.Slider(0,
                                     100000,
                                     step=1,
                                     value=1234,
                                     label='Seed')
                    only_first_stage = gr.Checkbox(
                        label='Only First Stage',
                        value=only_first_stage,
                        visible=not only_first_stage)
                    image_prompt = gr.Image(type="filepath",
                                            label="Image Prompt",
                                            value=None)
                    run_button = gr.Button('Run')

            with gr.Column():
                with gr.Group():
                    translated_text = gr.Textbox(label='Translated Text')
                    with gr.Tabs():
                        with gr.TabItem('Output (Video)'):
                            result_video = gr.Video(show_label=False)

        # examples = gr.Examples(
        #     examples=[['骑滑板的皮卡丘', False, 1234, True,None],
        #               ['a cat playing chess', True, 1253, True,None]],
        #     fn=model.run_with_translation,
        #     inputs=[text, translate, seed, only_first_stage,image_prompt],
        #     outputs=[translated_text, result_video],
        #     cache_examples=True)

        gr.Markdown(NOTES)
        gr.Markdown(FOOTER)
        print(gr.__version__)
        run_button.click(fn=post,
                         inputs=[
                             text,
                             translate,
                             seed,
                             only_first_stage,
                             image_prompt
                         ],
                         outputs=[translated_text, result_video])
        print(gr.__version__)
        
    demo.launch()


if __name__ == '__main__':
    main()