Spaces:
Running
on
L4
Running
on
L4
Update entrypoint.sh
Browse files- entrypoint.sh +19 -6
entrypoint.sh
CHANGED
@@ -3,13 +3,14 @@
|
|
3 |
mkdir -p data
|
4 |
|
5 |
# # TEN Turn Detection
|
6 |
-
HF_MODEL_PATH_1=TEN-framework/TEN_Turn_Detection
|
7 |
-
HF_MODEL_NAME_1=$(basename ${HF_MODEL_PATH_1})
|
8 |
-
LOCAL_MODEL_PATH_1=./data/${HF_MODEL_NAME_1}
|
9 |
-
VLLM_SERVE_API_KEY_1=TEN_Turn_Detection
|
|
|
|
|
|
|
10 |
|
11 |
-
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH_1} --local-dir ${LOCAL_MODEL_PATH_1}
|
12 |
-
vllm serve ${LOCAL_MODEL_PATH_1} --served-model-name ${HF_MODEL_NAME_1} --api-key ${VLLM_SERVE_API_KEY_1} &
|
13 |
|
14 |
# Shisa V2
|
15 |
# HF_MODEL_PATH=augmxnt/shisa-7b-v1
|
@@ -20,6 +21,18 @@ vllm serve ${LOCAL_MODEL_PATH_1} --served-model-name ${HF_MODEL_NAME_1} --api-ke
|
|
20 |
# HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH} --local-dir ${LOCAL_MODEL_PATH}
|
21 |
# vllm serve ${LOCAL_MODEL_PATH} --served-model-name ${HF_MODEL_NAME} --api-key ${VLLM_SERVE_API_KEY} --port 8000 &
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# Start frontend
|
24 |
export NEXT_PUBLIC_EDIT_GRAPH_MODE=false
|
25 |
cd /app/demo && npm run dev &
|
|
|
3 |
mkdir -p data
|
4 |
|
5 |
# # TEN Turn Detection
|
6 |
+
# HF_MODEL_PATH_1=TEN-framework/TEN_Turn_Detection
|
7 |
+
# HF_MODEL_NAME_1=$(basename ${HF_MODEL_PATH_1})
|
8 |
+
# LOCAL_MODEL_PATH_1=./data/${HF_MODEL_NAME_1}
|
9 |
+
# VLLM_SERVE_API_KEY_1=TEN_Turn_Detection
|
10 |
+
|
11 |
+
# HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH_1} --local-dir ${LOCAL_MODEL_PATH_1}
|
12 |
+
# vllm serve ${LOCAL_MODEL_PATH_1} --served-model-name ${HF_MODEL_NAME_1} --api-key ${VLLM_SERVE_API_KEY_1} &
|
13 |
|
|
|
|
|
14 |
|
15 |
# Shisa V2
|
16 |
# HF_MODEL_PATH=augmxnt/shisa-7b-v1
|
|
|
21 |
# HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH} --local-dir ${LOCAL_MODEL_PATH}
|
22 |
# vllm serve ${LOCAL_MODEL_PATH} --served-model-name ${HF_MODEL_NAME} --api-key ${VLLM_SERVE_API_KEY} --port 8000 &
|
23 |
|
24 |
+
|
25 |
+
# GPT OSS 20B
|
26 |
+
HF_MODEL_PATH_1=openai/gpt-oss-20b
|
27 |
+
HF_MODEL_NAME_1=$(basename ${HF_MODEL_PATH_1})
|
28 |
+
LOCAL_MODEL_PATH_1=./data/${HF_MODEL_NAME_1}
|
29 |
+
VLLM_SERVE_API_KEY_1=gpt_oss_20b
|
30 |
+
|
31 |
+
HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download ${HF_MODEL_PATH_1} --local-dir ${LOCAL_MODEL_PATH_1}
|
32 |
+
|
33 |
+
vllm serve ${LOCAL_MODEL_PATH_1} --served-model-name ${HF_MODEL_NAME_1} --api-key ${VLLM_SERVE_API_KEY_1} &
|
34 |
+
|
35 |
+
|
36 |
# Start frontend
|
37 |
export NEXT_PUBLIC_EDIT_GRAPH_MODE=false
|
38 |
cd /app/demo && npm run dev &
|