Commit 28138819 authored by Felix Bragman's avatar Felix Bragman

multi-task vgg

parent f1da4497
Pipeline #12590 failed with stages
in 9 seconds
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from six.moves import range
from niftynet.layer.convolution import ConvolutionalLayer
from niftynet.layer.downsample import DownSampleLayer
from niftynet.network.base_net import BaseNet
from niftynet.layer.fully_connected import FullyConnectedLayer
class VGG16Net(BaseNet):
"""
Implementation of VGG16-Net:
Simonyan and Zisserman, "Very Deep Convolutional Networks for
Large-Scale Image Recogntion", ICLR 2015
- Original paper trained on 224 x 224 RGB image
- No batch-norm in original paper and no drop-out
- Batch-norm is default in this implementation
- Preprocessing in paper: RGB image de-meaned based on training data
e.g. I[:, :, 0] = I[:, :, 0] = meanRed
I[:, :, 1] = I[:, :, 1] = meanGreen
I[:, :, 2] = I[:, :, 2] = meanBlue
- Conv layers: 'padding' = SAME, 'stride' = 1
- Max pool: 'window' = 2x2, 'stride' = 2
"""
def __init__(self,
num_classes,
w_initializer=None,
w_regularizer=None,
b_initializer=None,
b_regularizer=None,
acti_func='relu',
name='VGG16Net'):
super(VGG16Net, self).__init__(
num_classes=num_classes,
w_initializer=w_initializer,
w_regularizer=w_regularizer,
b_initializer=b_initializer,
b_regularizer=b_regularizer,
acti_func=acti_func,
name=name)
self.layers = [
{'name': 'layer_1', 'n_features': 64, 'kernel_size': 3, 'repeat': 2},
{'name': 'maxpool_1'},
{'name': 'layer_2', 'n_features': 128, 'kernel_size': 3, 'repeat': 2},
{'name': 'maxpool_2'},
{'name': 'layer_3', 'n_features': 256, 'kernel_size': 3, 'repeat': 3},
{'name': 'maxpool_3'},
{'name': 'layer_4', 'n_features': 512, 'kernel_size': 3, 'repeat': 3},
{'name': 'maxpool_4'},
{'name': 'layer_5', 'n_features': 512, 'kernel_size': 3, 'repeat': 3},
{'name': 'maxpool_5'},
{'name': 'fc_1', 'n_features': 4096},
{'name': 'fc_2', 'n_features': 4096},
{'name': 'fc_3', 'n_features': num_classes}]
def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
#assert layer_util.check_spatial_dims(
# images, lambda x: x % 224 == 0)
layer_instances = []
for layer_iter, layer in enumerate(self.layers):
# Get layer type
layer_type = self._get_layer_type(layer['name'])
if 'repeat' in layer:
repeat_conv = layer['repeat']
else:
repeat_conv = 1
# first layer
if layer_iter == 0:
conv_layer = ConvolutionalLayer(
n_output_chns=layer['n_features'],
kernel_size=layer['kernel_size'],
acti_func=self.acti_func,
w_initializer=self.initializers['w'],
w_regularizer=self.regularizers['w'],
name=layer['name'])
flow = conv_layer(images, is_training)
layer_instances.append((conv_layer, flow))
# last layer
elif layer_iter == len(self.layers)-1:
fc_layer = FullyConnectedLayer(
n_output_chns=layer['n_features'],
acti_func=self.acti_func,
w_initializer=self.initializers['w'],
w_regularizer=self.regularizers['w'],
)
flow = fc_layer(flow)
layer_instances.append((fc_layer, flow))
# all other
else:
# In case we want multiple convs before first maxpool
if repeat_conv > 1:
for _ in range(repeat_conv-1):
conv_layer = ConvolutionalLayer(
n_output_chns=layer['n_features'],
kernel_size=layer['kernel_size'],
acti_func=self.acti_func,
w_initializer=self.initializers['w'],
w_regularizer=self.regularizers['w'],
name=layer['name'])
flow = conv_layer(flow, is_training)
layer_instances.append((conv_layer, flow))
else:
if layer_type == 'maxpool':
downsample_layer = DownSampleLayer(
kernel_size=2,
func='MAX',
stride=2)
flow = downsample_layer(flow)
layer_instances.append((downsample_layer, flow))
elif layer_type == 'layer':
for _ in range(repeat_conv):
conv_layer = ConvolutionalLayer(
n_output_chns=layer['n_features'],
kernel_size=layer['kernel_size'],
acti_func=self.acti_func,
w_initializer=self.initializers['w'],
w_regularizer=self.regularizers['w'],
name=layer['name'])
flow = conv_layer(flow, is_training)
layer_instances.append((conv_layer, flow))
elif layer_type == 'fc':
fc_layer = FullyConnectedLayer(
n_output_chns=layer['n_features'],
acti_func=self.acti_func,
w_initializer=self.initializers['w'],
w_regularizer=self.regularizers['w'],
)
flow = fc_layer(flow, keep_prob=0.5)
layer_instances.append((fc_layer, flow))
if is_training:
self._print(layer_instances)
return flow
return layer_instances[layer_id][1]
@staticmethod
def _print(list_of_layers):
for (op, _) in list_of_layers:
print(op)
@staticmethod
def _get_layer_type(layer_name):
return layer_name.split('_')[0]
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment