File size: 1,182 Bytes
8333668 653bb2f 46a86c9 653bb2f 71b2874 52494dd 387fdbf bc2999d 4d836a9 387fdbf 4d27f4c 8333668 98c0fb0 4d836a9 98c0fb0 dbc88c0 98c0fb0 4d836a9 98c0fb0 4d27f4c dbc88c0 78242ba dbc88c0 78242ba 52494dd 4d836a9 4d27f4c 46a86c9 157fd87 092b6ff 4d27f4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
#!/bin/bash
export HF_HOME="/data/.huggingface"
echo "PWD: $(pwd)"
echo $HF_TOKEN > .hf_token
echo "LS: $(ls -als)"
while true; do nvidia-smi; sleep 600; done &
python3 -c "import torch; \
print(f\"is availeble = {torch.cuda.is_available()}\"); \
print(f\"device count = {torch.cuda.device_count()}\"); \
print(f\"current device = {torch.cuda.current_device()}\")"
echo "Starting serve.controller"
python3 -m serve.controller --host 0.0.0.0 --port 10000 &
P_CON=$!
# sleep 30
# echo "Starting prism-dinosiglip+13b"
# python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+13b &
# P4=$!
sleep 30
echo "Starting prism-dinosiglip+7b"
python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+7b &
P_P7B=$!
# python3 -m interactive_demo --port 40002 --model_id prism-dinosiglip-controlled+7b &
# P6=$!
# python3 -m interactive_demo --port 40003 --model_id llava-v1.5-7b &
# P7=$!
sleep 30
echo "Starting serve.gradio_web_server"
python3 -m serve.gradio_web_server --controller http://127.0.0.1:10000 --model-list-mode reload & # --share
P_WEB=$!
sleep 30
ls -als $HF_HOME
tree --du -h $HF_HOME
wait $P_CON $P_WEB $P_P7B # $P5 $P6 $P7 |