Spaces:
Sleeping
Sleeping
File size: 1,857 Bytes
b3fb4dd 49ebc1f b3fb4dd 49ebc1f b3fb4dd 49ebc1f b3fb4dd 2f54ec8 b3fb4dd 49ebc1f b3fb4dd 49ebc1f b3fb4dd 49ebc1f b3fb4dd 2f54ec8 49ebc1f 2f54ec8 47127a2 2f54ec8 49ebc1f 99dc7bf 49ebc1f b3fb4dd 2f54ec8 b3fb4dd 2f54ec8 b3fb4dd 49ebc1f b3fb4dd 2f54ec8 b3fb4dd 2f54ec8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
Data:
# Basics
log_dir: 'tasks/models'
# Data
dataset: "FFTDataset"
data_dir: None
model_name: "CNNEncoder"
batch_size: 32
num_epochs: 10
exp_num: 2
max_len_spectra: 4096
max_days_lc: 270
lc_freq: 0.0208
create_umap: True
checkpoint_path: 'tasks/models/frugal_2025-01-29/frugal_kan_2.pth'
CNNEncoder:
# Model
in_channels: 2
num_layers: 4
stride: 1
encoder_dims: [32,64,128]
kernel_size: 3
dropout_p: 0.3
output_dim: 2
beta: 1
load_checkpoint: False
checkpoint_num: 1
activation: "silu"
sine_w0: 30.0
avg_output: False
MLP:
input_dim: 6
hidden_dims: [16,32,6]
dropout: 0.2
KAN:
layers_hidden: [1125,32,8,1]
grid_min: -1.2
grid_max: 1.2
num_grids: 8
exponent: 2
KAN_INR:
layers_hidden: [1,1024,128,128,1]
grid_min: -1.2
grid_max: 1.2
num_grids: 8
exponent: 2
CNNEncoder_f:
# Model
in_channels: 32
num_layers: 4
stride: 1
encoder_dims: [32,64,128]
kernel_size: 3
dropout_p: 0.3
output_dim: 2
beta: 1
load_checkpoint: True
checkpoint_num: 1
activation: "silu"
sine_w0: 1.0
avg_output: True
Conformer:
encoder: ["mhsa_pro", "conv"]
timeshift: false
num_layers: 4
encoder_dim: 128
num_heads: 8
kernel_size: 3
dropout_p: 0.2
norm: "postnorm"
RelationalTransformer:
d_node: 32
d_edge: 32
d_attn_hid: 16
d_node_hid: 16
d_edge_hid: 16
d_out_hid: 16
d_out: 1
n_layers: 4
n_heads: 4
dropout: 0.1
INR:
in_features : 2
n_layers : 2
hidden_features : 64
out_features : 32
XGBoost:
objective : 'binary:logistic'
eval_metric : 'logloss'
use_label_encoder : False
n_estimators : 500
learning_rate : 0.1
max_depth : 5
subsample : 0.8
colsample_bytree : 0.8
random_state : 42
Optimization:
# Optimization
max_lr: 1e-5
weight_decay: 5e-6
warmup_pct: 0.3
steps_per_epoch: 3500
|