lux-voice-processing / conf /train_llm.yaml
marcellopoliti's picture
fix dockerfile
9da994b
raw
history blame contribute delete
622 Bytes
main:
project_name: lux-voice-processing
experiment_name: train_llm
parameters:
data: "llm_queries:latest"
system_template: "system_template:latest"
openai_parameters:
model: "gpt-3.5-turbo-1106"
temperature: 0.5
stream: False
frequency_penalty: 1.0 # range -2,2 -> higher new tokens more probable
n: 1 #How many chat completion choices to generate for each input message. n=1 minimize costs
presence_penalty: 1.0 # range -2.0,2.0, Positive values increase the model's likelihood to talk about new topics.
response_format: { "type": "json_object" }
seed: 42 #to obtain same answer on same question