File size: 1,102 Bytes
8333668 653bb2f 46a86c9 653bb2f ec4f1be 52494dd 387fdbf bc2999d 387fdbf 8333668 b9a13da c1cc930 237d997 8333668 c1cc930 dbc88c0 c1cc930 dbc88c0 78242ba dbc88c0 78242ba 52494dd 46a86c9 157fd87 092b6ff c1cc930 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
#!/bin/bash
export HF_HOME="/data/.huggingface"
echo "PWD: $(pwd)"
echo $HF_TOKEN > .hf_token
echo "LS: $(ls -als)"
while true; do nvidia-smi; sleep 60; done &
python3 -c "import torch; \
print(f\"is availeble = {torch.cuda.is_available()}\"); \
print(f\"device count = {torch.cuda.device_count()}\"); \
print(f\"current device = {torch.cuda.current_device()}\")"
python3 -m serve.controller --host 0.0.0.0 --port 10000 &
P1=$!
sleep 30
echo "Starting serve.gradio_web_server"
python3 -m serve.gradio_web_server --controller http://127.0.0.1:10000 --model-list-mode reload & # --share
P2=$!
# echo "Starting prism-dinosiglip+13b"
# python3 -m interactive_demo --port 40000 --model_id prism-dinosiglip+13b &
# P4=$!
echo "Starting prism-dinosiglip+7b"
python3 -m interactive_demo --port 40001 --model_id prism-dinosiglip+7b &
P5=$!
# python3 -m interactive_demo --port 40002 --model_id prism-dinosiglip-controlled+7b &
# P6=$!
# python3 -m interactive_demo --port 40003 --model_id llava-v1.5-7b &
# P7=$!
ls -als $HF_HOME
tree --du -h $HF_HOME
wait $P1 $P2 $P3 $P5 # $P5 $P6 $P7 |