...
 
Commits (14)
This diff is collapsed.
......@@ -119,7 +119,7 @@ within each section.
Name | Type | Example | Default
---- | ---- | ------- | -------
[csv_file](#csv-file) | `string` | `csv_file=file_list.csv` | `''`
[csv_path_file](#csv-path-file) | `string` | `csv_path_file=file_list.csv` | `''`
[path_to_search](#path-to-search) | `string` | `path_to_search=my_data/fold_1` | NiftyNet home folder
[filename_contains](#filename-contains) | `string` or `string array` | `filename_contains=foo, bar` | `''`
[filename_not_contains](#filename-not-contains) | `string` or `string array` | `filename_not_contains=foo` | `''`
......@@ -130,7 +130,7 @@ within each section.
[spatial_window_size](#spatial-window-size) | `integer array` | `spatial_window_size=64, 64, 64` | `''`
[loader](#loader) | `string` | `loader=simpleitk` | `None`
###### `csv_file`
###### `csv_path_file`
A file path to a list of input images. If the file exists, input image name
list will be loaded from the file; the filename based input image search will
be disabled; [path_to_search](#path-to-search),
......@@ -211,8 +211,8 @@ with an interpolation order of `3`.
A CSV file with the matched filenames and extracted subject names will be
generated to `T1Image.csv` in [`model_dir`](#model-dir) (by default; the CSV
file location can be specified by setting [csv_file](#csv-file)). To exclude
particular images, the [csv_file](#csv-file) can be edited manually.
file location can be specified by setting [csv_path_file](#csv-path-file)). To exclude
particular images, the [csv_path_file](#csv-path-file) can be edited manually.
This input source can be used alone, as a `T1` MRI input to an application.
It can also be used along with other modalities, a multi-modality example
......
......@@ -212,10 +212,8 @@ class ClassificationApplication(BaseApplication):
labels = tf.reshape(tf.cast(data_dict['label'], tf.int64), [-1])
prediction = tf.reshape(tf.argmax(net_out, -1), [-1])
num_classes = self.classification_param.num_classes
conf_mat = tf.contrib.metrics.confusion_matrix(labels,
prediction,
num_classes)
conf_mat = tf.to_float(conf_mat) / float(self.net_param.batch_size)
conf_mat = tf.metrics.confusion_matrix(labels, prediction, num_classes)
conf_mat = tf.to_float(conf_mat)
if self.classification_param.num_classes == 2:
outputs_collector.add_to_collection(
var=conf_mat[1][1], name='true_positives',
......
This diff is collapsed.
This diff is collapsed.
############################ input configuration sections
[modality1]
path_to_search = ./example_volumes/csv_data
filename_contains = MahalT1Mask_
filename_removefromid = MahalT1Mask_
filename_not_contains =
spatial_window_size = (35, 35, 35)
interp_order = 3
pixdim=(1.0, 1.0, 1.0)
axcodes=(A, R, S)
[label]
path_to_search = ./example_volumes/csv_data/
filename_contains = BinLabel
filename_not_contains =
filename_removefromid = BinLabel_
spatial_window_size = (35, 35, 35)
interp_order = 0
pixdim=(1.0, 1.0, 1.0)
axcodes=(A, R, S)
[sampler]
csv_data_file = ./example_volumes/csv_data/PlacesLabels.csv
############################## system configuration sections
[SYSTEM]
cuda_devices = ""
num_threads = 5
num_gpus = 1
model_dir = ./models/model_monomodal_toy
[NETWORK]
name = toynet
activation_function = prelu
batch_size = 1
decay = 0.1
reg_type = L2
# volume level preprocessing
volume_padding_size = 21
# histogram normalisation
histogram_ref_file = ./example_volumes/monomodal_parcellation/standardisation_models.txt
norm_type = percentile
cutoff = (0.01, 0.99)
normalisation = False
whitening = False
normalise_foreground_only=False
foreground_type = otsu_plus
multimod_foreground_type = and
window_sampling=patch
queue_length = 20
[TRAINING]
sample_per_volume = 1
rotation_angle = (-10.0, 10.0)
scaling_percentage = (-10.0, 10.0)
random_flipping_axes= 1
lr = 0.01
loss_type = Dice
starting_iter = 0
save_every_n = 100
max_iter = 10
max_checkpoints = 20
[INFERENCE]
border = (0, 0, 1)
#inference_iter = 10
save_seg_dir = ./output/toy
output_interp_order = 0
spatial_window_size = (0, 0, 3)
[EVALUATION]
evaluations=Dice
############################ custom configuration sections
[SEGMENTATION]
image = modality1
label = label
sampler = sampler
output_prob = False
num_classes = 160
label_normalisation = True
import numpy as np
from niftynet.engine.image_window_dataset import ImageWindowDataset
from niftynet.engine.image_window import N_SPATIAL, LOCATION_FORMAT
......@@ -15,6 +16,7 @@ class ImageWindowDatasetCSV(ImageWindowDataset):
windows_per_image=1,
shuffle=True,
queue_length=10,
num_threads=4,
epoch=-1,
smaller_final_batch_mode='pad',
name='random_vector_sampler'):
......@@ -30,6 +32,7 @@ class ImageWindowDatasetCSV(ImageWindowDataset):
epoch=epoch,
smaller_final_batch_mode=smaller_final_batch_mode,
name=name)
self.set_num_threads(num_threads)
def layer_op(self, idx=None):
"""
......@@ -83,9 +86,10 @@ class ImageWindowDatasetCSV(ImageWindowDataset):
image_data[LOCATION_FORMAT.format(mod)] = coords
image_data[mod] = image_data[mod][np.newaxis, ...]
if self.csv_reader is not None:
_, label_data, _ = self.csv_reader(idx=image_id)
image_data['label'] = label_data['label']
image_data['label_location'] = image_data['image_location']
_, label_dict, _ = self.csv_reader(subject_id=image_id)
image_data.update(label_dict)
for name in self.csv_reader.names:
image_data[name + '_location'] = image_data['image_location']
return image_data
@property
......
This diff is collapsed.
......@@ -29,6 +29,7 @@ class ResizeSamplerCSV(ImageWindowDatasetCSV):
windows_per_image=1,
shuffle=True,
queue_length=10,
num_threads=4,
smaller_final_batch_mode='pad',
name='resize_sampler_v2'):
tf.logging.info('reading size of preprocessed images')
......@@ -41,6 +42,7 @@ class ResizeSamplerCSV(ImageWindowDatasetCSV):
batch_size=batch_size,
windows_per_image=windows_per_image,
queue_length=queue_length,
num_threads=num_threads,
shuffle=shuffle,
epoch=-1 if shuffle else 1,
smaller_final_batch_mode=smaller_final_batch_mode,
......@@ -65,55 +67,54 @@ class ResizeSamplerCSV(ImageWindowDatasetCSV):
:return: output data dictionary ``{'image_modality': data_array}``
"""
while True:
image_id, data, interp_orders = self.reader(idx=idx)
image_shapes = \
dict((name, data[name].shape) for name in self.window.names)
# window shapes can be dynamic, here they
# are converted to static ones
# as now we know the image shapes
static_window_shapes = self.window.match_image_shapes(image_shapes)
image_id, data, interp_orders = self.reader(idx=idx)
image_shapes = \
dict((name, data[name].shape) for name in self.window.names)
# window shapes can be dynamic, here they
# are converted to static ones
# as now we know the image shapes
static_window_shapes = self.window.match_image_shapes(image_shapes)
# for resize sampler the coordinates are not used
# simply use the spatial dims of the input image
output_dict = {}
for name in list(data):
# prepare output dictionary keys
coordinates_key = LOCATION_FORMAT.format(name)
image_data_key = name
# for resize sampler the coordinates are not used
# simply use the spatial dims of the input image
output_dict = {}
for name in list(data):
# prepare output dictionary keys
coordinates_key = LOCATION_FORMAT.format(name)
image_data_key = name
output_dict[coordinates_key] = self.dummy_coordinates(
image_id, static_window_shapes[name], self.window.n_samples)
image_array = []
for _ in range(self.window.n_samples):
# prepare image data
image_shape = image_shapes[name]
window_shape = static_window_shapes[name]
output_dict[coordinates_key] = np.squeeze(self.dummy_coordinates(
image_id, static_window_shapes[name], self.window.n_samples), axis=0)
image_array = []
for _ in range(self.window.n_samples):
# prepare image data
image_shape = image_shapes[name]
window_shape = static_window_shapes[name]
if image_shape == window_shape or interp_orders[name][0] < 0:
# already in the same shape
image_window = data[name]
else:
zoom_ratio = [float(p) / float(d) for p, d in
zip(window_shape, image_shape)]
image_window = zoom_3d(image=data[name],
ratio=zoom_ratio, interp_order=
interp_orders[name][0])
image_array.append(image_window[np.newaxis, ...])
if len(image_array) > 1:
output_dict[image_data_key] = \
np.concatenate(image_array, axis=0)
if image_shape == window_shape or interp_orders[name][0] < 0:
# already in the same shape
image_window = data[name]
else:
output_dict[image_data_key] = np.squeeze(image_array[0], axis=0)
# the output image shape should be
# [enqueue_batch_size, x, y, z, time, modality]
# here enqueue_batch_size = 1 as we only have one sample
# per image
if self.csv_reader is not None:
_, label_dict, _ = self.csv_reader(idx=image_id)
output_dict['label'] = np.squeeze(label_dict['label'], axis=0)
output_dict['label_location'] = output_dict['image_location']
yield output_dict
zoom_ratio = [float(p) / float(d) for p, d in
zip(window_shape, image_shape)]
image_window = zoom_3d(image=data[name],
ratio=zoom_ratio, interp_order=
interp_orders[name][0])
image_array.append(image_window[np.newaxis, ...])
if len(image_array) > 1:
output_dict[image_data_key] = \
np.concatenate(image_array, axis=0)
else:
output_dict[image_data_key] = image_array[0]
# the output image shape should be
# [enqueue_batch_size, x, y, z, time, modality]
# here enqueue_batch_size = 1 as we only have one sample
# per image
if self.csv_reader is not None:
_, label_dict, _ = self.csv_reader(idx=image_id)
output_dict.update(label_dict)
for name in self.csv_reader.names:
output_dict[name + '_location'] = output_dict['image_location']
return output_dict
def zoom_3d(image, ratio, interp_order):
......
This diff is collapsed.
......@@ -3,7 +3,6 @@
from __future__ import absolute_import, division, print_function
from copy import deepcopy
import argparse
import numpy as np
import pandas
......@@ -21,8 +20,8 @@ from niftynet.utilities.util_common import look_up_operations
DEFAULT_INTERP_ORDER = 1
SUPPORTED_DATA_SPEC = {
'csv_file', 'path_to_search',
'filename_contains', 'filename_not_contains', 'filename_removefromid',
'csv_path_file', 'path_to_search', 'csv_data_file', 'filename_removefromid',
'filename_contains', 'filename_not_contains', 'to_ohe',
'interp_order', 'loader', 'pixdim', 'axcodes', 'spatial_window_size'}
......@@ -407,7 +406,7 @@ def _filename_to_image_list(file_list, mod_dict, data_param, num_threads=10):
if not volume_list:
tf.logging.fatal(
"Empty filename lists, please check the csv "
"files. (removing csv_file keyword if it is in the config file "
"files. (removing csv_path_file keyword if it is in the config file "
"to automatically search folders and generate new csv "
"files again)\n\n"
"Please note in the matched file names, each subject id are "
......@@ -473,7 +472,6 @@ def _create_image(file_list, idx, modalities, data_param):
'loader': loader}
return ImageFactory.create_instance(**image_properties)
def param_to_dict(input_data_param):
"""
Validate the user input ``input_data_param``
......@@ -503,4 +501,4 @@ def param_to_dict(input_data_param):
for data_key in dict_param:
look_up_operations(data_key, SUPPORTED_DATA_SPEC)
data_param[mod] = dict_param
return data_param
return data_param
\ No newline at end of file
......@@ -180,7 +180,7 @@ class ImageSetsPartitioner(object):
return subset[section_names]
return subset
def load_data_sections_by_subject(self):
def load_data_sections_by_subject(self, merge_multi=False):
"""
Go through all input data sections, converting each section
to a list of file names.
......@@ -194,14 +194,34 @@ class ImageSetsPartitioner(object):
'Nothing to load, please check input sections in the config.')
raise ValueError
self._file_list = None
for section_name in self.data_param:
section_first = [section_name for section_name in
self.data_param if
self.data_param[section_name].csv_data_file =='']
section_second = [section_name for section_name in self.data_param if
section_name not in section_first]
usable_section = section_first + section_second
for section_name in usable_section:
modality_file_list = self.grep_files_by_data_section(section_name)
if self._file_list is None:
# adding all rows of the first modality
self._file_list = modality_file_list
continue
n_rows = self._file_list[COLUMN_UNIQ_ID].count()
self._file_list = pandas.merge(self._file_list,
if len(modality_file_list.index) > n_rows and set(
modality_file_list.index) == set(self._file_list[
COLUMN_UNIQ_ID]):
tf.logging.warning('The data file has multiple entries for '
'each subject')
if merge_multi:
modality_file_list[COLUMN_UNIQ_ID] = modality_file_list.index
self._file_list = pandas.merge(self._file_list,
modality_file_list,
on=COLUMN_UNIQ_ID,
how='outer')
else:
self._file_list = pandas.merge(self._file_list,
modality_file_list,
how='outer',
on=COLUMN_UNIQ_ID)
......@@ -212,7 +232,7 @@ class ImageSetsPartitioner(object):
if self._file_list is None or self._file_list.size == 0:
tf.logging.fatal(
"Empty filename lists, please check the csv "
"files (removing csv_file keyword if it is in the config file "
"files (removing csv_data_file keyword if it is in the config file "
"to automatically search folders and generate new csv "
"files again).\n\n"
"Please note in the matched file names, each subject id are "
......@@ -226,10 +246,10 @@ class ImageSetsPartitioner(object):
def grep_files_by_data_section(self, modality_name):
"""
list all files by a given input data section::
if the ``csv_file`` property of ``data_param[modality_name]``
if the ``csv_data_file`` property of ``data_param[modality_name]``
corresponds to a file, read the list from the file;
otherwise
write the list to ``csv_file``.
write the list to ``csv_data_file``.
:return: a table with two columns,
the column names are ``(COLUMN_UNIQ_ID, modality_name)``.
......@@ -240,7 +260,7 @@ class ImageSetsPartitioner(object):
modality_name, list(self.data_param))
raise ValueError
# input data section must have a ``csv_file`` section for loading
# input data section must have a ``csv_data_file`` section for loading
# or writing filename lists
if isinstance(self.data_param[modality_name], dict):
mod_spec = self.data_param[modality_name]
......@@ -248,45 +268,45 @@ class ImageSetsPartitioner(object):
mod_spec = vars(self.data_param[modality_name])
#########################
# guess the csv_file path
# guess the csv_data_file path
#########################
temp_csv_file = None
temp_csv_data_file = None
try:
csv_file = os.path.expanduser(mod_spec.get('csv_file', None))
if not os.path.isfile(csv_file):
csv_data_file = os.path.expanduser(mod_spec.get('csv_data_file', None))
if not os.path.isfile(csv_data_file):
# writing to the same folder as data_split_file
default_csv_file = os.path.join(
default_csv_data_file = os.path.join(
os.path.dirname(self.data_split_file),
'{}.csv'.format(modality_name))
tf.logging.info('`csv_file = %s` not found, '
tf.logging.info('`csv_data_file = %s` not found, '
'writing to "%s" instead.',
csv_file, default_csv_file)
csv_file = default_csv_file
if os.path.isfile(csv_file):
tf.logging.info('Overwriting existing: "%s".', csv_file)
csv_file = os.path.abspath(csv_file)
csv_data_file, default_csv_data_file)
csv_data_file = default_csv_data_file
if os.path.isfile(csv_data_file):
tf.logging.info('Overwriting existing: "%s".', csv_data_file)
csv_data_file = os.path.abspath(csv_data_file)
except (AttributeError, KeyError, TypeError):
tf.logging.debug('`csv_file` not specified, writing the list of '
tf.logging.debug('`csv_data_file` not specified, writing the list of '
'filenames to a temporary file.')
import tempfile
temp_csv_file = os.path.join(
temp_csv_data_file = os.path.join(
tempfile.mkdtemp(), '{}.csv'.format(modality_name))
csv_file = temp_csv_file
csv_data_file = temp_csv_data_file
#############################################
# writing csv file if path_to_search specified
##############################################
if mod_spec.get('path_to_search', None):
if not temp_csv_file:
if not temp_csv_data_file:
tf.logging.info(
'[%s] search file folders, writing csv file %s',
modality_name, csv_file)
modality_name, csv_data_file)
# grep files by section properties and write csv
try:
matcher = KeywordsMatching.from_dict(
input_dict=mod_spec,
default_folder=self.default_image_file_location)
match_and_write_filenames_to_csv([matcher], csv_file)
match_and_write_filenames_to_csv([matcher], csv_data_file)
except (IOError, ValueError) as reading_error:
tf.logging.warning('Ignoring input section: [%s], '
'due to the following error:',
......@@ -297,28 +317,37 @@ class ImageSetsPartitioner(object):
else:
tf.logging.info(
'[%s] using existing csv file %s, skipped filenames search',
modality_name, csv_file)
modality_name, csv_data_file)
if not os.path.isfile(csv_file):
if not os.path.isfile(csv_data_file):
tf.logging.fatal(
'[%s] csv file %s not found.', modality_name, csv_file)
'[%s] csv file %s not found.', modality_name, csv_data_file)
raise IOError
###############################
# loading the file as dataframe
###############################
try:
csv_list = pandas.read_csv(
csv_file,
header=None,
dtype=(str, str),
names=[COLUMN_UNIQ_ID, modality_name],
skipinitialspace=True)
if self.data_param[modality_name].csv_data_file == '':
csv_list = pandas.read_csv(
csv_data_file,
header=None,
dtype=(str, str),
names=[COLUMN_UNIQ_ID, modality_name],
skipinitialspace=True)
else:
csv_list = pandas.read_csv(
csv_data_file,
header=None,
index_col=0,
)
csv_list.index = csv_list.index.map(str)
except Exception as csv_error:
tf.logging.fatal(repr(csv_error))
raise
if temp_csv_file:
shutil.rmtree(os.path.dirname(temp_csv_file), ignore_errors=True)
if temp_csv_data_file:
shutil.rmtree(os.path.dirname(temp_csv_data_file), ignore_errors=True)
return csv_list
......
......@@ -135,6 +135,6 @@ class InstanceNormLayer(TrainableLayer):
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format='NHWC',
data_format='NWC',
scope=None)
......@@ -79,5 +79,5 @@ def cross_entropy(prediction,
:return: the loss
"""
ground_truth = tf.to_int64(ground_truth)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=ground_truth)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prediction, labels=ground_truth)
return loss
\ No newline at end of file
......@@ -78,7 +78,6 @@ class BSplineFieldImageGridWarperLayer(GridWarperLayer):
for d in [0, 1, 2]]
resampled=tf.stack(resampled_list,5)
permuted_shape=[batch_size]+[f-3 for f in self._coeff_shape]+self._knot_spacing+[spatial_rank]
print(permuted_shape)
permuted=tf.transpose(tf.reshape(resampled,permuted_shape),[0,1,4,2,5,3,6,7])
valid_size=[(f-3)*k for f,k in zip(self._coeff_shape,self._knot_spacing)]
reshaped=tf.reshape(permuted,[batch_size]+valid_size+[spatial_rank])
......
......@@ -68,7 +68,7 @@ class ResNet(BaseNet):
out = layers.conv1(images, is_training)
for block in layers.blocks:
out = block(out, is_training)
out = tf.reduce_mean(tf.nn.relu(layers.bn(out, is_training)),axis=[1,2,3])
out = tf.nn.relu(layers.bn(out, is_training))
return layers.fc(out)
......@@ -114,7 +114,6 @@ class BottleneckBlock(TrainableLayer):
out=layers.conv[1](out, is_training)
out=layers.conv[2](out, is_training)
out = layers.conv_shortcut(tmp, is_training) + out
print(out.shape)
return out
DownResBlockDesc = namedtuple('DownResBlockDesc', ['blocks'])
......
......@@ -181,12 +181,27 @@ def add_input_data_args(parser):
:return:
"""
parser.add_argument(
"--csv_file",
"--csv_path_file",
metavar='',
type=str,
help="Input list of subjects in csv files",
default='')
parser.add_argument(
"--csv_data_file",
metavar='',
type=str,
help="Path to a csv with data; labels, features or coordinates for"
"the patch based sampler",
default='')
parser.add_argument(
"--to_ohe",
help="Indicates if the data provided in the csv should be one-hot-encoded."
"This is only valid when the csv_data_file has 2 columns",
type=str2boolean,
default=False)
parser.add_argument(
"--path_to_search",
metavar='',
......@@ -329,7 +344,7 @@ def add_network_args(parser):
help="How to sample patches from each loaded image:"
" 'uniform': fixed size uniformly distributed,"
" 'resize': resize image to the patch size.",
choices=['uniform', 'resize', 'balanced', 'weighted'],
choices=['uniform', 'resize', 'balanced', 'weighted','patch'],
default='uniform')
parser.add_argument(
......
......@@ -189,16 +189,16 @@ def run():
# set the output path of csv list if not exists
try:
csv_path = resolve_file_name(
input_data_args[section].csv_file,
input_data_args[section].csv_path_file,
(os.path.dirname(config_file_name), NIFTYNET_HOME))
input_data_args[section].csv_file = csv_path
input_data_args[section].csv_path_file = csv_path
# don't search files if csv specified in config
try:
delattr(input_data_args[section], 'path_to_search')
except AttributeError:
pass
except (IOError, TypeError):
input_data_args[section].csv_file = ''
input_data_args[section].csv_path_file = ''
# preserve ``config_file`` and ``action parameter`` from the meta_args
system_args['CONFIG_FILE'] = argparse.Namespace(path=config_file_name)
......
......@@ -18,7 +18,7 @@ generate_2d_images()
# test multiple modalities
MULTI_MOD_DATA = {
'T1': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1reader.csv'),
csv_path_file=os.path.join('testing_data', 'T1reader.csv'),
path_to_search='testing_data',
filename_contains=('_o_T1_time',),
filename_not_contains=('Parcellation',),
......@@ -28,7 +28,7 @@ MULTI_MOD_DATA = {
loader=None
),
'FLAIR': ParserNamespace(
csv_file=os.path.join('testing_data', 'FLAIRreader.csv'),
csv_path_file=os.path.join('testing_data', 'FLAIRreader.csv'),
path_to_search='testing_data',
filename_contains=('FLAIR_',),
filename_not_contains=('Parcellation',),
......@@ -43,7 +43,7 @@ MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR'))
# test single modalities
SINGLE_MOD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
csv_path_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
......@@ -57,7 +57,7 @@ SINGLE_MOD_TASK = ParserNamespace(image=('lesion',))
EXISTING_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
csv_path_file=os.path.join('testing_data', 'lesion.csv'),
interp_order=3,
pixdim=None,
axcodes=None,
......@@ -68,7 +68,7 @@ EXISTING_DATA = {
# test labels
LABEL_DATA = {
'parcellation': ParserNamespace(
csv_file=os.path.join('testing_data', 'labels.csv'),
csv_path_file=os.path.join('testing_data', 'labels.csv'),
path_to_search='testing_data',
filename_contains=('Parcellation',),
filename_not_contains=('Lesion',),
......@@ -82,7 +82,7 @@ LABEL_TASK = ParserNamespace(label=('parcellation',))
BAD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
csv_path_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
......@@ -96,7 +96,7 @@ BAD_TASK = ParserNamespace(image=('test',))
IMAGE_2D_DATA = {
'color_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_u.csv'),
csv_path_file=os.path.join('testing_data', 'images_2d_u.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_u.png',),
interp_order=1,
......@@ -105,7 +105,7 @@ IMAGE_2D_DATA = {
loader=None
),
'gray_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_g.csv'),
csv_path_file=os.path.join('testing_data', 'images_2d_g.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_g.png',),
interp_order=1,
......@@ -114,7 +114,7 @@ IMAGE_2D_DATA = {
loader=None
),
'seg_masks': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_m.csv'),
csv_path_file=os.path.join('testing_data', 'images_2d_m.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_m.png',),
interp_order=0,
......
......@@ -138,7 +138,7 @@ class Read2DTest(tf.test.TestCase):
def test_input_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
'csv_file': '2d_test.csv'}}
'csv_path_file': '2d_test.csv'}}
reader = ImageReader().initialise(data_param)
self.default_property_asserts(reader)
idx, data, interp = reader()
......@@ -151,7 +151,7 @@ class Read2DTest(tf.test.TestCase):
def test_no_2d_resampling_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
'csv_file': '2d_test.csv',
'csv_path_file': '2d_test.csv',
'pixdim': (2, 2, 2),
'axcodes': 'RAS'}}
reader = ImageReader().initialise(data_param)
......@@ -199,7 +199,7 @@ class Read2D_1DTest(tf.test.TestCase):
# loading 2d images of rank 3: [x, y, 1]
def test_no_2d_resampling_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D_1,
'csv_file': '2d_test.csv',
'csv_path_file': '2d_test.csv',
'filename_contains': '_img',
'pixdim': (2, 2, 2),
'axcodes': 'RAS'}}
......@@ -282,7 +282,7 @@ class Read2D_colorTest(tf.test.TestCase):
# loading 2d images of rank 3: [x, y, 3] or [x, y, 4]
def test_no_2d_resampling_properties(self):
data_param = {'mr': {'path_to_search': IMAGE_PATH_2D,
'csv_file': '2d_test.csv',
'csv_path_file': '2d_test.csv',
'filename_contains': '_u',
'pixdim': (2, 2, 2),
'axcodes': 'RAS'}}
......
[image]
path_to_search = /home/tom/data/BRATS_18_SPLITS/train
filename_contains =
filename_not_contains =seg
spatial_window_size = (32, 32, 1)
axcodes=(A, R, S)
interp_order = 1
[label]
csv_data_file = ./modality_labels.csv
to_ohe = False
############################## system configuration sections
[SYSTEM]
cuda_devices = ""
num_threads = 2
num_gpus = 1
model_dir = ./models/model_highres3dnet
[NETWORK]
name = resnet
activation_function = relu
batch_size = 1
decay = 0
reg_type = L2
# volume level preprocessing
volume_padding_size = 21
# histogram normalisation
histogram_ref_file = ./example_volumes/monomodal_parcellation/standardisation_models.txt
norm_type = percentile
cutoff = (0.01, 0.99)
normalisation = True
whitening = True
normalise_foreground_only=True
foreground_type = otsu_plus
multimod_foreground_type = and
queue_length = 1
window_sampling = resize
[TRAINING]
sample_per_volume = 32
rotation_angle = (-10.0, 10.0)
scaling_percentage = (-10.0, 10.0)
lr = 0.0001
loss_type = CrossEntropy
starting_iter = 0
save_every_n = 5
max_iter = 6
max_checkpoints = 20
[INFERENCE]
border = (5, 5, 5)
#inference_iter = 10
save_seg_dir = ./output/highres3dnet
output_interp_order = 0
spatial_window_size = (0, 0, 3)
############################ custom configuration sections
[CLASSIFICATION]
image = image
label = label
output_prob = False
num_classes = 4
label_normalisation = False