forked from wolny/pytorch-3dunet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_config_regression.yaml
149 lines (144 loc) · 5.34 KB
/
train_config_regression.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# use a fixed random seed to guarantee that when you run the code twice you will get the same outcome
manual_seed: 0
# model configuration
model:
# model class, e.g. UNet3D, ResidualUNet3D
name: ResidualUNet3D
# number of input channels to the model
in_channels: 1
# number of output channels
out_channels: 1
# determines the order of operators in a single layer (gcr - GroupNorm+Conv3d+ReLU)
layer_order: gcr
# feature maps scale factor
f_maps: 16
# number of groups in the groupnorm
num_groups: 8
# if True applies the final normalization layer (sigmoid or softmax), otherwise the networks returns the output from the final convolution layer; use False for regression problems, e.g. de-noising
is_segmentation: false
# trainer configuration
trainer:
# path to the checkpoint directory
checkpoint_dir: 3dunet
# path to latest checkpoint; if provided the training will be resumed from that checkpoint
resume: null
# how many iterations between validations
validate_after_iters: 20
# how many iterations between tensorboard logging
log_after_iters: 20
# max number of epochs
epochs: 100
# max number of iterations
iters: 100000
# model with higher eval score is considered better
eval_score_higher_is_better: true
# optimizer configuration
optimizer:
# initial learning rate
learning_rate: 0.0002
# weight decay
weight_decay: 0.0001
# loss function configuration
loss:
# loss function to be used during training
name: SmoothL1Loss
# a target value that is ignored and does not contribute to the input gradient
ignore_index: null
# evaluation metric configuration
eval_metric:
# peak signal to noise ration
name: PSNR
# a target label that is ignored during metric evaluation
ignore_index: null
# learning rate scheduler configuration
lr_scheduler:
# reduce learning rate when evaluation metric plateaus
name: ReduceLROnPlateau
# use 'max' if eval_score_higher_is_better=True, 'min' otherwise
mode: max
# factor by which learning rate will be reduced
factor: 0.5
# number of *validation runs* with no improvement after which learning rate will be reduced
patience: 10
# data loaders configuration
loaders:
# class of the HDF5 dataset, currently StandardHDF5Dataset and LazyHDF5Dataset are supported.
# When using LazyHDF5Dataset make sure to set `num_workers = 1`, due to a bug in h5py which corrupts the data
# when reading from multiple threads.
dataset: StandardHDF5Dataset
# batch dimension; if number of GPUs is N > 1, then a batch_size of N * batch_size will automatically be taken for DataParallel
batch_size: 1
# how many subprocesses to use for data loading
num_workers: 4
# path to the raw data within the H5
raw_internal_path: raw
# path to the the label data within the H5
label_internal_path: random
# path to the pixel-wise weight map withing the H5 if present
weight_internal_path: null
# configuration of the train loader
train:
# absolute paths to the training datasets; if a given path is a directory all H5 files ('*.h5', '*.hdf', '*.hdf5', '*.hd5')
# inside this this directory will be included as well (non-recursively)
file_paths:
- '../resources/random3D.h5'
# SliceBuilder configuration, i.e. how to iterate over the input volume patch-by-patch
slice_builder:
# SliceBuilder class
name: SliceBuilder
# train patch size given to the network (adapt to fit in your GPU mem, generally the bigger patch the better)
patch_shape: [32, 128, 128]
# train stride between patches
stride_shape: [16, 100, 100]
# data transformations/augmentations
transformer:
raw:
# apply min-max scaling and map the input to [-1, 1]
- name: Normalize
- name: RandomFlip
- name: RandomRotate90
- name: RandomRotate
# rotate only in ZY only since most volumetric data is anisotropic
axes: [[2, 1]]
angle_spectrum: 15
mode: reflect
- name: ToTensor
expand_dims: true
label:
# apply min-max scaling and map the input to [-1, 1]
- name: Normalize
- name: RandomFlip
- name: RandomRotate90
- name: RandomRotate
# rotate only in ZY only since most volumetric data is anisotropic
axes: [[2, 1]]
angle_spectrum: 15
mode: reflect
- name: ToTensor
expand_dims: true
# configuration of the validation loaders
val:
# paths to the validation datasets; if a given path is a directory all H5 files ('*.h5', '*.hdf', '*.hdf5', '*.hd5')
# inside this this directory will be included as well (non-recursively)
file_paths:
- '../resources/random3D_copy.h5'
# SliceBuilder configuration
slice_builder:
# SliceBuilder class
name: SliceBuilder
# validation patch (can be bigger than train patch since there is no backprop)
patch_shape: [32, 128, 128]
# validation stride (validation patches doesn't need to overlap)
stride_shape: [32, 128, 128]
# no data augmentation during validation
transformer:
raw:
# apply min-max scaling and map the input to [-1, 1]
- name: Normalize
- name: ToTensor
expand_dims: true
label:
# apply min-max scaling and map the input to [-1, 1]
- name: Normalize
- name: ToTensor
expand_dims: true