This repository has been archived by the owner on Oct 31, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 302
/
config.yaml
124 lines (112 loc) · 3.44 KB
/
config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
defaults:
- dset: debug
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
# Dataset related
sample_rate: 16000
segment: 4
stride: 1 # in seconds, how much to stride between training examples
pad: true # if training sample is too short, pad it
# Dataset Augmentation
remix: false # remix noise and clean
bandmask: 0. # drop at most this fraction of freqs in mel scale
shift: 0 # random shift, number of samples
shift_same: false # shift noise and clean by the same amount
revecho: 0 # add reverb like augment
# Logging and printing, and does not impact training
num_prints: 5
device: cuda
num_workers: 5
verbose: 0
show: 0 # just show the model and its size and exit
# Checkpointing, by default automatically load last checkpoint
checkpoint: true
continue_from: '' # Path the a checkpoint.th file to start from.
# this is not used in the name of the experiment!
# so use a dummy=something not to mixup experiments.
continue_best: false # continue from best, not last state if continue_from is set.
continue_pretrained: # use either dns48, dns64 or master64 to fine tune from pretrained-model
restart: false # Ignore existing checkpoints
checkpoint_file: checkpoint.th
best_file: best.th # will contain only best model at any point
history_file: history.json
samples_dir: samples
save_again: false # if true, only load checkpoint and save again, useful to reexport best.th
# Other stuff
seed: 2036
dummy: # use this if you want twice the same exp, with a different name
# Evaluation stuff
pesq: True # compute pesq?
eval_every: 10 # compute test metrics every so epochs
dry: 0. # dry/wet knob value at eval
streaming: False # use streaming evaluation for Demucs
# Optimization related
optim: adam
lr: 3e-4
beta2: 0.999
loss: l1
stft_loss: False
stft_sc_factor: .5
stft_mag_factor: .5
epochs: 500
batch_size: 64
# Models
model: demucs # either demucs or dwave
demucs:
chin: 1
chout: 1
hidden: 48
max_hidden: 10000
causal: true
glu: true
depth: 5
kernel_size: 8
stride: 4
normalize: true
resample: 4
growth: 2
rescale: 0.1
# Experiment launching, distributed
ddp: false
ddp_backend: nccl
rendezvous_file: ./rendezvous
# Internal config, don't set manually
rank:
world_size:
# Hydra config
hydra:
run:
dir: ./outputs/exp_${hydra.job.override_dirname}
job:
config:
# configuration for the ${hydra.job.override_dirname} runtime variable
override_dirname:
kv_sep: '='
item_sep: ','
# Remove all paths, as the / in them would mess up things
# Remove params that would not impact the training itself
# Remove all slurm and submit params.
# This is ugly I know...
exclude_keys: [
'hydra.job_logging.handles.file.filename',
'dset.train', 'dset.valid', 'dset.test', 'dset.noisy_json', 'dset.noisy_dir',
'num_prints', 'continue_from', 'save_again',
'device', 'num_workers', 'print_freq', 'restart', 'verbose',
'log', 'ddp', 'ddp_backend', 'rendezvous_file', 'rank', 'world_size']
job_logging:
handlers:
file:
class: logging.FileHandler
mode: w
formatter: colorlog
filename: trainer.log
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stderr
hydra_logging:
handlers:
console:
class: logging.StreamHandler
formatter: colorlog
stream: ext://sys.stderr