File size: 4,291 Bytes
7b3e7e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2ad9fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b3e7e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# from .demo_modelpart import InferenceDemo
import gradio as gr
import os
from threading import Thread

# import time
import cv2

import datetime
# import copy
import torch

import spaces
import numpy as np

from llava.constants import DEFAULT_IMAGE_TOKEN

from llava.constants import (
    IMAGE_TOKEN_INDEX,
    DEFAULT_IMAGE_TOKEN,
)
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import (
    tokenizer_image_token,
    get_model_name_from_path,
    KeywordsStoppingCriteria,
)

from serve_constants import html_header, bibtext, learn_more_markdown, tos_markdown

from decord import VideoReader, cpu

import requests
from PIL import Image
import io
from io import BytesIO
from transformers import TextStreamer, TextIteratorStreamer

import hashlib
import PIL
import base64
import json

import datetime
import gradio as gr
import gradio_client
import subprocess
import sys

from huggingface_hub import HfApi
from huggingface_hub import login
from huggingface_hub import revision_exists

login(token=os.environ["HF_TOKEN"],
      write_permission=True)

api = HfApi()
repo_name = os.environ["LOG_REPO"]

external_log_dir = "./logs"
LOGDIR = external_log_dir
VOTEDIR = "./votes"


with gr.Blocks(
    css=".message-wrap.svelte-1lcyrx4>div.svelte-1lcyrx4  img {min-width: 40px}",
) as demo:
    cur_dir = os.path.dirname(os.path.abspath(__file__))
    # gr.Markdown(title_markdown)
    gr.HTML(html_header)
    
    with gr.Column():
        with gr.Accordion("Parameters", open=False) as parameter_row:
                temperature = gr.Slider(
                    minimum=0.0,
                    maximum=1.0,
                    value=0.7,
                    step=0.1,
                    interactive=True,
                    label="Temperature",
                )
                top_p = gr.Slider(
                    minimum=0.0,
                    maximum=1.0,
                    value=1,
                    step=0.1,
                    interactive=True,
                    label="Top P",
                )
                max_output_tokens = gr.Slider(
                    minimum=0,
                    maximum=8192,
                    value=4096,
                    step=256,
                    interactive=True,
                    label="Max output tokens",
                )
        with gr.Row():
            chatbot = gr.Chatbot([], elem_id="MAmmoTH-VL-8B", bubble_full_width=False, height=750)

        with gr.Row():
            upvote_btn = gr.Button(value="πŸ‘  Upvote", interactive=True)
            downvote_btn = gr.Button(value="πŸ‘Ž  Downvote", interactive=True)
            flag_btn = gr.Button(value="⚠️  Flag", interactive=True)
            # stop_btn = gr.Button(value="⏹️  Stop Generation", interactive=True)
            regenerate_btn = gr.Button(value="πŸ”„  Regenerate", interactive=True)
            clear_btn = gr.Button(value="πŸ—‘οΈ  Clear history", interactive=True)

demo.queue()

if __name__ == "__main__":
    import argparse

    argparser = argparse.ArgumentParser()
    argparser.add_argument("--server_name", default="0.0.0.0", type=str)
    argparser.add_argument("--model_path", default="TIGER-Lab/MAmmoTH-VL2", type=str)
    argparser.add_argument("--model-base", type=str, default=None)
    argparser.add_argument("--num-gpus", type=int, default=1)
    argparser.add_argument("--conv-mode", type=str, default=None)
    argparser.add_argument("--temperature", type=float, default=0.7)
    argparser.add_argument("--max-new-tokens", type=int, default=4096)
    argparser.add_argument("--num_frames", type=int, default=32)
    argparser.add_argument("--load-8bit", action="store_true")
    argparser.add_argument("--load-4bit", action="store_true")
    argparser.add_argument("--debug", action="store_true")

    args = argparser.parse_args()

    model_path = args.model_path
    filt_invalid = "cut"
    model_name = get_model_name_from_path(args.model_path)
    tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
    model=model.to(torch.device('cuda'))
    chat_image_num = 0
    demo.launch()