Spaces:
Running
on
L4
Running
on
L4
File size: 1,369 Bytes
898d83e 264681b d487566 264681b b1f4ce1 a7cb3f4 aa7ea36 db4e8bf 1259ba1 7afe90b aa7ea36 7afe90b 946e50b a7cb3f4 12bbaf2 d4b7fb9 00e5fa1 80cc641 12bbaf2 11f5c93 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
#!/bin/bash
mkdir -p data
# # TEN Turn Detection
# HF_MODEL_PATH_1=TEN-framework/TEN_Turn_Detection
# HF_MODEL_NAME_1=$(basename ${HF_MODEL_PATH_1})
# LOCAL_MODEL_PATH_1=./data/${HF_MODEL_NAME_1}
# VLLM_SERVE_API_KEY_1=TEN_Turn_Detection
# HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH_1} --local-dir ${LOCAL_MODEL_PATH_1}
# vllm serve ${LOCAL_MODEL_PATH_1} --served-model-name ${HF_MODEL_NAME_1} --api-key ${VLLM_SERVE_API_KEY_1} &
# Shisa V2
# HF_MODEL_PATH=augmxnt/shisa-7b-v1
# HF_MODEL_NAME=$(basename ${HF_MODEL_PATH})
# LOCAL_MODEL_PATH=./data/${HF_MODEL_NAME}
# VLLM_SERVE_API_KEY=shisa_7b_v1
# HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH} --local-dir ${LOCAL_MODEL_PATH}
# vllm serve ${LOCAL_MODEL_PATH} --served-model-name ${HF_MODEL_NAME} --api-key ${VLLM_SERVE_API_KEY} --port 8000 &
# GPT OSS 20B
HF_MODEL_PATH_1=openai/gpt-oss-20b
HF_MODEL_NAME_1=$(basename ${HF_MODEL_PATH_1})
LOCAL_MODEL_PATH_1=./data/${HF_MODEL_NAME_1}
VLLM_SERVE_API_KEY_1=gpt_oss_20b
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH_1} --local-dir ${LOCAL_MODEL_PATH_1}
vllm serve ${LOCAL_MODEL_PATH_1} --served-model-name ${HF_MODEL_NAME_1} --api-key ${VLLM_SERVE_API_KEY_1} &
# Start frontend
export NEXT_PUBLIC_EDIT_GRAPH_MODE=false
cd /app/demo && npm run dev &
# Start backend
cd /app && task run
|