batch_size: 4 | |
class_identifier: instruction_ranking_metric | |
dropout: 0.1 | |
encoder_learning_rate: 1.0e-06 | |
encoder_model: XLM-RoBERTa | |
keep_embeddings_frozen: true | |
layer: mix | |
layer_norm: false | |
layer_transformation: sparsemax | |
layerwise_decay: 0.95 | |
learning_rate: 1.5e-05 | |
load_pretrained_weights: true | |
loss: triplet-margin | |
nr_frozen_epochs: 0.3 | |
optimizer: AdamW | |
pool: avg | |
pretrained_model: xlm-roberta-large | |
train_data: | |
- data/APE_train.csv | |
validation_data: | |
- data/APE_valid.csv | |
warmup_steps: 0 | |