-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathutils_model.py
357 lines (326 loc) · 17.4 KB
/
utils_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
import tensorflow as tf
from coord_conv import CoordConv
from tensorflow.keras.layers import Conv2D, UpSampling2D, Activation, Add, Multiply, MaxPooling2D
from tensorflow.keras.layers import SeparableConv2D, BatchNormalization, Dropout
from tensorflow_addons.layers import GroupNormalization
from tensorflow.keras.layers import ReLU, PReLU
from tensorflow.keras.layers import Conv3D, UpSampling3D, MaxPool3D
hn = 'he_normal' #kernel initializer
def conv_block(x_in, filters, batch_norm=False, kernel_size=(3, 3),
kernel_initializer='glorot_uniform', acti='relu', dropout_rate=None):
x = Conv2D(filters, kernel_size, padding='same', kernel_initializer=kernel_initializer)(x_in)
# if batch_norm == True:
# x = BatchNormalization()(x)
x = Activation(acti)(x)
x = Conv2D(filters, kernel_size, padding='same', kernel_initializer=kernel_initializer)(x)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def coordconv_block(x_in, x_dim, y_dim, filters, batch_norm=False, kernel_size=(3, 3),
kernel_initializer='glorot_uniform', acti='relu', dropout_rate=None, with_r=False):
x = Conv2D(filters, kernel_size, padding='same', kernel_initializer=kernel_initializer)(x_in)
# if batch_norm == True:
# x = BatchNormalization()(x)
x = Activation(acti)(x)
x = CoordConv(x_dim, y_dim, with_r, filters, kernel_size, padding='same', kernel_initializer=kernel_initializer)(x)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def conv_2d(x_in, filters, batch_norm=False, kernel_size=(3, 3), acti='relu',
kernel_initializer='glorot_uniform', dropout_rate=None):
x = Conv2D(filters, kernel_size, padding='same', kernel_initializer=kernel_initializer)(x_in)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def pool(x_in, pool_size=(2, 2), type='Max'):
if type == 'Max':
p = MaxPooling2D(pool_size)(x_in)
return p
def up(x_in, filters, merge, batch_norm=False,
kernel_initializer='glorot_uniform', dropout_rate=None, size=(2, 2)):
u = UpSampling2D(size)(x_in)
conv = conv_2d(u, filters, batch_norm, acti='relu', kernel_initializer=kernel_initializer,
dropout_rate=dropout_rate)
concat = tf.concat([merge, conv], axis=-1)
return concat
def attention_block(input_signal, gated_signal, filters):
#input signal feature maps
is_fm = Conv2D(filters, kernel_size=(1,1), strides=(2, 2), padding = 'same')(input_signal)
#gated signal feature maps
gs_fm = Conv2D(filters, kernel_size=(1,1), strides=(1, 1), padding = 'same')(gated_signal)
#debugger
assert is_fm.shape!=gs_fm.shape, "Feature maps shape doesn't match!"
#element wise sum
add = Add()([is_fm, gs_fm])
acti = Activation('relu')(add)
#downsampled attention coefficient
bottle_neck = Conv2D(1, kernel_size=(1,1), activation='sigmoid')(acti)
#bilinear interpolation to get attention coeffcient
alpha = UpSampling2D(interpolation='bilinear')(bottle_neck)
#filter off input signal's features with attention coefficient
multi = Multiply()([input_signal, alpha])
return multi
def conv_block_sep(x_in, filters, layer_name, batch_norm=False, kernel_size=(3, 3),
kernel_initializer='glorot_uniform', acti='relu', dropout_rate=None):
assert type(filters)==list, "Please input filters of type list."
assert type(layer_name)==list, "Please input filters of type list."
x = SeparableConv2D(filters[0], kernel_size, padding='same', kernel_initializer=kernel_initializer, name = layer_name[0])(x_in)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
x = SeparableConv2D(filters[1], kernel_size, padding='same', kernel_initializer=kernel_initializer, name = layer_name[1])(x)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def conv_2d_sep(x_in, filters, layer_name, batch_norm=False, kernel_size=(3, 3), acti='relu',
kernel_initializer='glorot_uniform', dropout_rate=None):
x = SeparableConv2D(filters, kernel_size, padding='same', kernel_initializer=kernel_initializer, name=layer_name)(x_in)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def conv_2d(x_in, filters, layer_name, strides=(1,1), batch_norm=False, kernel_size=(3, 3), acti='relu',
kernel_initializer='glorot_uniform', dropout_rate=None):
x = Conv2D(filters, kernel_size, strides, padding='same', kernel_initializer=kernel_initializer, name=layer_name)(x_in)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def down_sampling_sep(x_in, filters, layer_name, batch_norm=False, kernel_size=(3, 3), acti='relu',
kernel_initializer='glorot_uniform', dropout_rate=None, mode ='coord', x_dim=None, y_dim=None):
assert mode=='coord' or mode=='normal', "Use 'coord' or 'normal' for mode!"
if mode=='coord':
#seperable coordconv
assert (x_dim!=None and y_dim!=None), "Please input dimension for CoordConv!"
x = Conv2D(1, kernel_size, strides=(2, 2), padding='same', kernel_initializer=kernel_initializer)(x_in)
x = CoordConv(x_dim=x_dim, y_dim=y_dim, with_r=False, filters=filters, strides=(1,1),
kernel_size = 3, padding='same', kernel_initializer=kernel_initializer, name=layer_name)(x)
else:
#normal mode
x = SeparableConv2D(filters, kernel_size, strides=(2, 2), padding='same', kernel_initializer=kernel_initializer, name=layer_name)(x_in)
if batch_norm == True:
x = BatchNormalization()(x)
x = Activation(acti)(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
return x
def res_block_sep(x_in, filters, layer_name, batch_norm=False, kernel_size=(3, 3),
kernel_initializer='glorot_uniform', acti='relu', dropout_rate=None):
assert len(filters)==2, "Please assure that there is 3 values for filters."
assert len(layer_name)==3, "Please assure that there is 3 values for layer name"
layer_name_conv = [layer_name[i] for i in range(len(layer_name)-1)]
output_conv_block = conv_block_sep(x_in, filters, layer_name_conv, batch_norm=batch_norm, kernel_size=kernel_size,
kernel_initializer = kernel_initializer, acti = acti, dropout_rate=dropout_rate)
output_add = Add(name = layer_name[-1])([output_conv_block, x_in])
return output_add
def conv_block_sep_v2(x, filters, layer_name, norm_fn='bn', kernel_size=(3, 3),
kernel_initializer='glorot_uniform', acti_fn='relu', dropout_rate=None):
'''
Dual convolution block with [full pre-activation], Norm -> Acti -> Conv
:param x: Input features
:param filters: A list that contains the number of filters for 1st and 2nd convolutional layer
:param layer_name: A list that contains the name for the 1st and 2nd convolutional layer
:param norm_fn: Tensorflow function for normalization, 'bn' for Batch Norm, 'gn' for Group Norm
:param kernel_size: Kernel size for both convolutional layer with 3x3 as default
:param kernel_initializer: Initializer for kernel weights with 'glorot uniform' as default
:param acti_fn: Tensorflow function for activation, 'relu' for ReLU, 'prelu' for PReLU
:param dropout_rate: Specify dropouts for layers
:return: Feature maps of same size as input with number of filters equivalent to the last layer
'''
assert type(filters)==list, "Please input filters of type list."
assert type(layer_name)==list, "Please input filters of type list."
assert acti_fn!= None, 'There should be an activation functino specified'
#1st convolutional block
if norm_fn=='bn':
x = BatchNormalization()(x)
elif norm_fn=='gn':
x = GroupNormalization()(x)
if acti_fn=='relu':
x = ReLU()(x)
elif acti_fn=='prelu':
x = PReLU(shared_axes=[1,2])(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
x = SeparableConv2D(filters[0], kernel_size, padding='same', kernel_initializer=kernel_initializer, name = layer_name[0])(x)
#2nd convolutional block
if norm_fn=='bn':
x = BatchNormalization()(x)
elif norm_fn=='gn':
x = GroupNormalization()(x)
if acti_fn=='relu':
x = ReLU()(x)
elif acti_fn=='prelu':
x = PReLU(shared_axes=[1,2])(x)
x = SeparableConv2D(filters[1], kernel_size, padding='same', kernel_initializer=kernel_initializer, name = layer_name[1])(x)
return x
def down_sampling_sep_v2(x, filters, layer_name, norm_fn='bn', kernel_size=(3, 3), acti_fn='relu',
kernel_initializer='glorot_uniform', dropout_rate=None, mode ='coord', x_dim=None, y_dim=None):
'''
Down sampling function version 2 with Convolutional layer of stride 2 as downsampling operation, with
[full pre-activation], Norm -> Acti -> Conv
:param x: Input features
:param filters: Number of filters for Convolutional layer of stride 2
:param layer_name: Layer name for convolutional layer
:param norm_fn: Tensorflow function for normalization, 'bn' for Batch Norm, 'gn' for Group Norm
:param kernel_size: Kernel size for both convolutional layer with 3x3 as default
:param acti_fn: Tensorflow function for activation, 'relu' for ReLU, 'prelu' for PReLU
:param kernel_initializer: Initializer for kernel weights with 'glorot uniform' as default
:param dropout_rate: Specify dropouts for layers
:param mode: 'coord' for Seperable Coord Conv, 'normal' for Seperable Conv
:param x_dim: x dimension for coord conv
:param y_dim: y dimension for coord conv
:return: Feature maps of size scaled down by 2 with number of filters specified
'''
assert mode=='coord' or mode=='normal', "Use 'coord' or 'normal' for mode!"
assert acti_fn!= None, 'There should be an activation functino specified'
#normalization
if norm_fn=='bn':
x = BatchNormalization()(x)
elif norm_fn=='gn':
x = GroupNormalization()(x)
if acti_fn=='relu':
x = ReLU()(x)
#activation
elif acti_fn=='prelu':
x = PReLU(shared_axes=[1,2])(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
if mode=='coord':
#seperable coordconv
assert (x_dim!=None and y_dim!=None), "Please input dimension for CoordConv!"
x = Conv2D(1, kernel_size, strides=(2, 2), padding='same', kernel_initializer=kernel_initializer)(x)
x = CoordConv(x_dim=x_dim, y_dim=y_dim, with_r=False, filters=filters, strides=(1,1),
kernel_size = 3, padding='same', kernel_initializer=kernel_initializer, name=layer_name)(x)
else:
#normal mode
x = SeparableConv2D(filters, kernel_size, strides=(2, 2), padding='same', kernel_initializer=kernel_initializer, name=layer_name)(x)
return x
def res_block_sep_v2(x_in, filters, layer_name, norm_fn='gn', kernel_size=(3, 3),
kernel_initializer='glorot_uniform', acti_fn='prelu', dropout_rate=None):
assert len(filters)==2, "Please assure that there is 2 values for filters."
assert len(layer_name)==3, "Please assure that there is 3 values for layer name"
layer_name_conv = [layer_name[i] for i in range(len(layer_name)-1)]
output_conv_block = conv_block_sep_v2(x_in, filters, layer_name_conv, norm_fn=norm_fn, kernel_size=kernel_size,
kernel_initializer = kernel_initializer, acti_fn = acti_fn, dropout_rate=dropout_rate)
output_add = Add(name = layer_name[-1])([output_conv_block, x_in])
return output_add
#------------------3D Section------------------------------------------
def conv_block_3D(x, filters, norm_fn='gn', kernel_size=3,
kernel_initializer=hn, acti_fn='prelu', dropout_rate=None):
'''
Dual convolution block with [full pre-activation], Norm -> Acti -> Conv
:param x: Input features
:param filters: A list that contains the number of filters for 1st and 2nd convolutional layer
:param norm_fn: Tensorflow function for normalization, 'bn' for Batch Norm, 'gn' for Group Norm
:param kernel_size: Kernel size for both convolutional layer with 3x3 as default
:param kernel_initializer: Initializer for kernel weights with 'glorot uniform' as default
:param acti_fn: Tensorflow function for activation, 'relu' for ReLU, 'prelu' for PReLU
:param dropout_rate: Specify dropouts for layers
:return: Feature maps of same size as input with number of filters equivalent to the last layer
'''
assert type(filters)==list, "Please input filters of type list."
assert acti_fn!= None, 'There should be an activation function specified'
#1st convolutional block
if norm_fn=='bn':
x = BatchNormalization()(x)
elif norm_fn=='gn':
x = GroupNormalization()(x)
if acti_fn=='relu':
x = ReLU()(x)
elif acti_fn=='prelu':
x = PReLU(shared_axes=[1,2,3])(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
x = Conv3D(filters[0], kernel_size, padding='same', kernel_initializer=kernel_initializer)(x)
#2nd convolutional block
if norm_fn=='bn':
x = BatchNormalization()(x)
elif norm_fn=='gn':
x = GroupNormalization()(x)
if acti_fn=='relu':
x = ReLU()(x)
elif acti_fn=='prelu':
x = PReLU(shared_axes=[1,2,3])(x)
x = Conv3D(filters[1], kernel_size, padding='same', kernel_initializer=kernel_initializer)(x)
return x
def down_sampling_3D(x, filters, norm_fn='gn', kernel_size=3, acti_fn='relu',
kernel_initializer=hn, dropout_rate=None):
'''
Down sampling function version 2 with Convolutional layer of stride 2 as downsampling operation, with
[full pre-activation], Norm -> Acti -> Conv
:param x: Input features
:param filters: Number of filters for Convolutional layer of stride 2
:param norm_fn: Tensorflow function for normalization, 'bn' for Batch Norm, 'gn' for Group Norm
:param kernel_size: Kernel size for both convolutional layer with 3x3 as default
:param acti_fn: Tensorflow function for activation, 'relu' for ReLU, 'prelu' for PReLU
:param kernel_initializer: Initializer for kernel weights with 'glorot uniform' as default
:param dropout_rate: Specify dropouts for layers
:return: Feature maps of size scaled down by 2 with number of filters specified
'''
assert acti_fn!= None, 'There should be an activation function specified'
#normalization
if norm_fn=='bn':
x = BatchNormalization()(x)
elif norm_fn=='gn':
x = GroupNormalization()(x)
if acti_fn=='relu':
x = ReLU()(x)
#activation
elif acti_fn=='prelu':
x = PReLU(shared_axes=[1,2,3])(x)
if dropout_rate != None:
x = Dropout(dropout_rate)(x)
#normal mode
x = Conv3D(filters, kernel_size, strides=(2,2,2), padding='same', kernel_initializer=kernel_initializer)(x)
return x
def res_block_3D(x_in, filters, norm_fn='gn', kernel_size=3,
kernel_initializer=hn, acti_fn='prelu', dropout_rate=None):
'''
This function construct the residual block in 3D by input->conv_block_3D->concat([input,conv_output])
:param x: Input features
:param filters: A list that contains the number of filters for 1st and 2nd convolutional layer
:param norm_fn: Tensorflow function for normalization, 'bn' for Batch Norm, 'gn' for Group Norm
:param kernel_size: Kernel size for both convolutional layer with 3x3 as default
:param kernel_initializer: Initializer for kernel weights with 'glorot uniform' as default
:param acti_fn: Tensorflow function for activation, 'relu' for ReLU, 'prelu' for PReLU
:param dropout_rate: Specify dropouts for layers
:return: Resblock output => concatenating input with 2*convlutional output
'''
assert len(filters)==2, "Please assure that there is 2 values for filters."
output_conv_block = conv_block_3D(x_in, filters, norm_fn=norm_fn, kernel_size=kernel_size,
kernel_initializer = kernel_initializer, acti_fn = acti_fn, dropout_rate=dropout_rate)
output_add = Add()([output_conv_block, x_in])
return output_add
def up_3D(x_in, filters, merge, kernel_initializer=hn, size=(2, 2, 2)):
'''
This function carry out the operation of deconvolution => upsampling + convolution, and
concatenating feture maps from the skip connection with the deconv feature maps
@param x_in: input feature
@param filters: Number of filters
@param merge: featrure maps from the skip connection
@param kernel_initializer: Initializer for kernel weights with 'glorot uniform' as default
@param size: Upsampling size, by default (1,2,2)
@return: concatenate feature maps of skip connection output and upsampled feature maps from previous output
'''
u = UpSampling3D(size)(x_in)
conv = Conv3D(filters=filters, kernel_size=3, padding='same',
kernel_initializer=kernel_initializer)(u)
conv = PReLU(shared_axes=[1,2,3])(conv)
concat = tf.concat([merge, conv], axis=-1)
return concat