Spaces:
Sleeping
Sleeping
Delete hyper_parameters.py
Browse files- hyper_parameters.py +0 -70
hyper_parameters.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
from text import symbols
|
2 |
-
|
3 |
-
# creating a python dictionary with all hyper parameters
|
4 |
-
|
5 |
-
tacotron_params = {'filter_length': 1024, # audio parameters:
|
6 |
-
'hop_length': 256,
|
7 |
-
'win_length': 1024,
|
8 |
-
'n_mel_channels': 80,
|
9 |
-
'mel_fmin': 0.0,
|
10 |
-
'mel_fmax': 8000.0,
|
11 |
-
'sampling_rate': 22050,
|
12 |
-
'max_wav_value': 32768.0,
|
13 |
-
'clipping_value': 1e-5,
|
14 |
-
'C': 1,
|
15 |
-
# dataset parameters:
|
16 |
-
'load_mel_from_disk': False,
|
17 |
-
'sort_by_length': False,
|
18 |
-
'text_cleaners': ['english_cleaners'],
|
19 |
-
# embedding parameters:
|
20 |
-
'symbols_embedding_length': 512,
|
21 |
-
'n_symbols': len(symbols),
|
22 |
-
# encoder parameters:
|
23 |
-
'encoder_embedding_dim': 512,
|
24 |
-
'encoder_convs': 3,
|
25 |
-
'conv_kernel_size': 5,
|
26 |
-
'conv_stride': 1,
|
27 |
-
'conv_dilation': 1,
|
28 |
-
'w_init_gain': 'relu',
|
29 |
-
# decoder parameters:
|
30 |
-
'number_frames_step': 1,
|
31 |
-
'decoder_rnn_dim': 1024,
|
32 |
-
'prenet_dim': 256,
|
33 |
-
'max_decoder_steps': 1000,
|
34 |
-
'gate_threshold': 0.5, # Need to be reviewed
|
35 |
-
'p_attention_dropout': 0.1,
|
36 |
-
'p_decoder_dropout': 0.1,
|
37 |
-
# attention parameters:
|
38 |
-
'attention_rnn_dim': 1024,
|
39 |
-
'attention_dim': 128,
|
40 |
-
# location features parameters:
|
41 |
-
'attention_location_n_filters': 32,
|
42 |
-
'attention_location_kernel_size': 31,
|
43 |
-
# postnet parameters:
|
44 |
-
'postnet_embedding_dim': 512,
|
45 |
-
'postnet_kernel_size': 5,
|
46 |
-
'postnet_n_convolutions': 5,
|
47 |
-
# GST parameters:
|
48 |
-
'E': 512,
|
49 |
-
'token_num': 3,
|
50 |
-
'num_heads': 1,
|
51 |
-
'seq_ref_enc_filter_size': [3, 7, 11], # phoneme, word/silence, utterance levels respectively
|
52 |
-
'ref_enc_out_channels': [8, 16, 16],
|
53 |
-
# optimization parameters:
|
54 |
-
'use_saved_learning_rate': True,
|
55 |
-
'batch_size': 32, # 64 should be larger than the number of GPUs. Integer multiple of the num. of GPUs
|
56 |
-
'learning_rate': 1e-3,
|
57 |
-
'weight_decay': 1e-6,
|
58 |
-
'grad_clip_thresh': 1.0,
|
59 |
-
'mask_padding': False,
|
60 |
-
# experiment parameters:
|
61 |
-
'epochs': 300, # 160, 500
|
62 |
-
'iters_per_checkpoint': 1500, # 1000. How many iterations before validating
|
63 |
-
'seed': 1234,
|
64 |
-
'dynamic_loss_scaling': True, # CHECK IT OUT!
|
65 |
-
'distributed_run': False,
|
66 |
-
'dist_backend': 'nccl',
|
67 |
-
'dist_url': "/home/alex/PyTorch_TACOTRON_2/pycharm-tacotron2", # CHECK IT OUT!
|
68 |
-
'cudnn_enabled': True,
|
69 |
-
'cudnn_benchmark': False,
|
70 |
-
'fp16_run': False}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|