File size: 1,164 Bytes
8333668
653bb2f
46a86c9
 
653bb2f
 
 
 
71b2874
52494dd
387fdbf
bc2999d
 
 
 
4d836a9
387fdbf
8333668
 
b9a13da
 
c1cc930
237d997
8333668
 
4d836a9
 
c1cc930
 
 
dbc88c0
4d836a9
 
c1cc930
e10a521
c1cc930
dbc88c0
78242ba
 
dbc88c0
78242ba
 
52494dd
4d836a9
 
46a86c9
157fd87
092b6ff
e10a521
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#!/bin/bash

export HF_HOME="/data/.huggingface"

echo "PWD: $(pwd)"
echo $HF_TOKEN > .hf_token
echo "LS: $(ls -als)"

while true; do nvidia-smi; sleep 600; done &

python3 -c "import torch; \
    print(f\"is availeble = {torch.cuda.is_available()}\"); \
    print(f\"device count = {torch.cuda.device_count()}\"); \
    print(f\"current device = {torch.cuda.current_device()}\")"

echo "Starting serve.controller"
python3 -m serve.controller --host 0.0.0.0 --port 10000 &
P1=$!

sleep 30

echo "Starting serve.gradio_web_server"
python3 -m serve.gradio_web_server --controller http://127.0.0.1:10000 --model-list-mode reload & # --share
P2=$!

# sleep 30

# echo "Starting prism-dinosiglip+13b"
# python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+13b &
# P4=$!

sleep 30

echo "Starting prism-dinosiglip+7b"
python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+7b &
P5=$!

# python3 -m interactive_demo --port 40002 --model_id prism-dinosiglip-controlled+7b &
# P6=$!

# python3 -m interactive_demo --port 40003 --model_id llava-v1.5-7b &
# P7=$!

sleep 30

ls -als $HF_HOME
tree --du -h $HF_HOME

wait $P1 $P2 $P5 # $P5 $P6 $P7