|
use_wandb: False |
|
dataset: |
|
name: 'dataset' |
|
records_path: null |
|
initial_dataset: '' |
|
label_schema: ["Yes", "No"] |
|
max_samples: 10 |
|
semantic_sampling: False |
|
|
|
|
|
annotator: |
|
method : 'argilla' |
|
config: |
|
api_url: 'https://kenken999-arglira.hf.space' |
|
api_key: '12345678' |
|
workspace: 'team' |
|
time_interval: 5 |
|
|
|
predictor: |
|
method : 'llm' |
|
config: |
|
llm: |
|
type: 'OpenAI' |
|
name: 'llama3-70b-8192' |
|
|
|
|
|
|
|
model_kwargs: {"seed": 220} |
|
num_workers: 5 |
|
prompt: 'prompts/predictor_completion/prediction.prompt' |
|
mini_batch_size: 1 |
|
mode: 'prediction' |
|
|
|
meta_prompts: |
|
folder: 'prompts/meta_prompts_classification' |
|
num_err_prompt: 1 |
|
num_err_samples: 2 |
|
history_length: 4 |
|
num_generated_samples: 10 |
|
num_initialize_samples: 10 |
|
samples_generation_batch: 10 |
|
num_workers: 5 |
|
warmup: 4 |
|
|
|
eval: |
|
function_name: 'accuracy' |
|
num_large_errors: 4 |
|
num_boundary_predictions : 0 |
|
error_threshold: 0.5 |
|
|
|
llm: |
|
type: 'OpenAI' |
|
name: 'llama3-70b-8192' |
|
temperature: 0.8 |
|
|
|
stop_criteria: |
|
max_usage: 2 |
|
patience: 10 |
|
min_delta: 0.01 |
|
|