inferencing-llm / config.yaml
Shyamnath's picture
Update config.yaml
0af53f0 verified
raw
history blame
390 Bytes
model_list:
- model_name: gpt-3.5-turbo
litellm_params:
model: gpt-3.5-turbo
general_settings:
master_key: "sk-1234"
port: 7860 # Changed to 8080 for Hugging Face spaces
# Enable UI dashboard
ui_features:
analytics_dashboard: true
model_config_management: true
key_management: true
allow_origins: ["*"] # Allow cross-origin requests
start_server: true