Spaces:
Running
on
Zero
Running
on
Zero
Update app_test.py
Browse files- app_test.py +11 -4
app_test.py
CHANGED
@@ -64,6 +64,12 @@ LOGDIR = external_log_dir
|
|
64 |
VOTEDIR = "./votes"
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
with gr.Blocks(
|
68 |
css=".message-wrap.svelte-1lcyrx4>div.svelte-1lcyrx4 img {min-width: 40px}",
|
69 |
) as demo:
|
@@ -104,10 +110,11 @@ with gr.Blocks(
|
|
104 |
upvote_btn = gr.Button(value="π Upvote", interactive=True)
|
105 |
downvote_btn = gr.Button(value="π Downvote", interactive=True)
|
106 |
flag_btn = gr.Button(value="β οΈ Flag", interactive=True)
|
107 |
-
# stop_btn = gr.Button(value="βΉοΈ Stop Generation", interactive=True)
|
108 |
regenerate_btn = gr.Button(value="π Regenerate", interactive=True)
|
109 |
clear_btn = gr.Button(value="ποΈ Clear history", interactive=True)
|
110 |
|
|
|
|
|
111 |
demo.queue()
|
112 |
|
113 |
if __name__ == "__main__":
|
@@ -130,8 +137,8 @@ if __name__ == "__main__":
|
|
130 |
|
131 |
model_path = args.model_path
|
132 |
filt_invalid = "cut"
|
133 |
-
model_name = get_model_name_from_path(args.model_path)
|
134 |
-
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
|
135 |
-
model=model.to(torch.device('cuda'))
|
136 |
chat_image_num = 0
|
137 |
demo.launch()
|
|
|
64 |
VOTEDIR = "./votes"
|
65 |
|
66 |
|
67 |
+
@spaces.GPU
|
68 |
+
def bot(history, temperature, top_p, max_output_tokens):
|
69 |
+
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
70 |
+
print(f"### Chatbot instance ID: {id(our_chatbot)}")
|
71 |
+
|
72 |
+
|
73 |
with gr.Blocks(
|
74 |
css=".message-wrap.svelte-1lcyrx4>div.svelte-1lcyrx4 img {min-width: 40px}",
|
75 |
) as demo:
|
|
|
110 |
upvote_btn = gr.Button(value="π Upvote", interactive=True)
|
111 |
downvote_btn = gr.Button(value="π Downvote", interactive=True)
|
112 |
flag_btn = gr.Button(value="β οΈ Flag", interactive=True)
|
|
|
113 |
regenerate_btn = gr.Button(value="π Regenerate", interactive=True)
|
114 |
clear_btn = gr.Button(value="ποΈ Clear history", interactive=True)
|
115 |
|
116 |
+
bot()
|
117 |
+
|
118 |
demo.queue()
|
119 |
|
120 |
if __name__ == "__main__":
|
|
|
137 |
|
138 |
model_path = args.model_path
|
139 |
filt_invalid = "cut"
|
140 |
+
#model_name = get_model_name_from_path(args.model_path)
|
141 |
+
#tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
|
142 |
+
#model=model.to(torch.device('cuda'))
|
143 |
chat_image_num = 0
|
144 |
demo.launch()
|