Commit 55b17ebe by Wenqi Li

update versioning and source code encoding

parents 1660a332 866e74ff
Pipeline #9501 passed with stages
in 2 minutes 53 seconds
......@@ -13,7 +13,7 @@ zip_dir = '.'
target_dir = '.'
for zip_filename in {'TrainingData_Part1.zip', 'TrainingData_Part2.zip',
'TrainingData_Part3.zip'}:
print('Extracting', zip_filename , '...')
print('Extracting', zip_filename, '...')
zip_ref = zipfile.ZipFile(os.path.join(zip_dir, zip_filename), 'r')
zip_ref.extractall(os.path.basename(zip_filename.replace('.zip', '')))
zip_ref.close()
# -*- coding: utf-8 -*-
import sys,glob,csv
import tensorflow as tf
def rename_checkpoint_to_partial(source,target,transform):
......@@ -55,6 +56,6 @@ def main(argv):
return 2
rename_checkpoint_to_partial(argv[0],argv[1],argv[2])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
\ No newline at end of file
sys.exit(main(sys.argv[1:]))
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import numpy as np
......
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
......
# -*- coding: utf-8 -*-
import SimpleITK as sitk
import nibabel
import numpy as np
......
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import tensorflow as tf
......@@ -9,7 +10,7 @@ import numpy
Re-implementation of [1] for volumetric image processing.
[1] Zheng, Shuai, et al. "Conditional random names as recurrent neural networks."
[1] Zheng, Shuai, et al. "Conditional random names as recurrent neural networks."
CVPR 2015.
"""
......@@ -22,7 +23,7 @@ def permutohedral_prepare(position_vectors):
## Generate position vectors in lattice space
x=position_vectors/(numpy.sqrt(2./3.)*(nCh+1))
# Embed in lattice space using black magic from the permutohedral paper
alpha=lambda i:numpy.sqrt(float(i)/(float(i)+1.))
Ex=[None]*(nCh+1)
......@@ -40,10 +41,10 @@ def permutohedral_prepare(position_vectors):
# Find the simplex we are in and store it in rank (where rank describes what position coorinate i has
#in the sorted order of the features values)
di=Ex-tf.to_float(rem0)
_,index=tf.nn.top_k(di,nCh+1,sorted=True)
_,index=tf.nn.top_k(di,nCh+1,sorted=True)
_,rank=tf.nn.top_k(-index,nCh+1,sorted=True) # This can be done more efficiently if necessary following the permutohedral paper
# if the point doesn't lie on the plane (sum != 0) bring it back
# if the point doesn't lie on the plane (sum != 0) bring it back
rank=tf.to_int32(rank)+sumV
addMinusSub=tf.to_int32(rank<0)*(nCh+1)-tf.to_int32(rank>=nCh+1)*(nCh+1)
rank=rank+addMinusSub
......@@ -81,7 +82,7 @@ def permutohedral_prepare(position_vectors):
fusedKeys=tf.boolean_mask(fusedKeys,tf.not_equal(fusedI64Keys,-1))
fusedI64Keys=tf.boolean_mask(fusedI64Keys,tf.not_equal(fusedI64Keys,-1))
insertIndices = indextable.insert(fusedI64Keys,tf.expand_dims(tf.transpose(tf.range(1,tf.to_int64(tf.size(fusedI64Keys)+1),dtype=tf.int64)),1))
insertIndices = indextable.insert(fusedI64Keys,tf.expand_dims(tf.transpose(tf.range(1,tf.to_int64(tf.size(fusedI64Keys)+1),dtype=tf.int64)),1))
blurNeighbours1=[None]*(nCh+1)
blurNeighbours2=[None]*(nCh+1)
indices=[None]*(nCh+1)
......@@ -93,7 +94,7 @@ def permutohedral_prepare(position_vectors):
batch_index=tf.reshape(tf.meshgrid(tf.range(batch_size),tf.zeros([nVoxels],dtype=tf.int32))[0],[-1,1])
indices[dit] = tf.stack([tf.to_int32(indextable.lookup(i64keys[dit])),batch_index[:,0]],1) # where in the splat variable each simplex vertex is
return barycentric,blurNeighbours1,blurNeighbours2,indices
def permutohedral_compute(data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices,name,reverse):
batch_size=tf.shape(data_vectors)[0]
numSimplexCorners=int(barycentric.get_shape()[-1])
......@@ -139,22 +140,22 @@ def permutohedral_compute(data_vectors,barycentric,blurNeighbours1,blurNeighbour
return sliced
# Differentiation can be done using permutohedral lattice with gaussion filter order reversed
# To get this to work with automatic differentiation we use a hack attributed to Sergey Ioffe
# To get this to work with automatic differentiation we use a hack attributed to Sergey Ioffe
# mentioned here: http://stackoverflow.com/questions/36456436/how-can-i-define-only-the-gradient-for-a-tensorflow-subgraph/36480182
# Define custom py_func which takes also a grad op as argument: from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(numpy.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def gradientStub(data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices,name):
# This is a stub operator whose purpose is to allow us to overwrite the gradient.
# This is a stub operator whose purpose is to allow us to overwrite the gradient.
# The forward pass gives zeros and the backward pass gives the correct gradients for the permutohedral_compute function
return py_func(lambda data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices: data_vectors*0,
[data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices],
......@@ -166,8 +167,8 @@ def gradientStub(data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indice
def permutohedral_gen(permutohedral, data_vectors,name):
barycentric,blurNeighbours1,blurNeighbours2,indices=permutohedral
return gradientStub(data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices,name)+ tf.stop_gradient(tf.reshape(permutohedral_compute(data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices,name,reverse=False),data_vectors.get_shape()))
def ftheta(U,H1,permutohedrals,mu,kernel_weights, aspect_ratio,name):
nCh=U.get_shape().as_list()[-1]
batch_size=int(U.get_shape()[0])
......@@ -188,7 +189,7 @@ def ftheta(U,H1,permutohedrals,mu,kernel_weights, aspect_ratio,name):
class CRFAsRNNLayer(TrainableLayer):
"""
This class defines a layer implementing CRFAsRNN described in [1] using
This class defines a layer implementing CRFAsRNN described in [1] using
a bilateral and a spatial kernel as in [2].
Essentially, this layer smooths its input based on a distance in a feature
space comprising spatial and feature dimensions.
......@@ -197,8 +198,8 @@ class CRFAsRNNLayer(TrainableLayer):
"""
def __init__(self,alpha=5.,beta=5.,gamma=5.,T=5,aspect_ratio=[1.,1.,1.], name="crf_as_rnn"):
"""
Parameters:
alpha: bandwidth for spatial coordinates in bilateral kernel.
Parameters:
alpha: bandwidth for spatial coordinates in bilateral kernel.
Higher values cause more spatial blurring
beta: bandwidth for feature coordinates in bilateral kernel
Higher values cause more feature blurring
......@@ -220,7 +221,7 @@ class CRFAsRNNLayer(TrainableLayer):
Parameters:
I: feature maps defining the non-spatial dimensions within which smoothing is performed
For example, to smooth U within regions of similar intensity this would be the
image intensity
image intensity
U: activation maps to smooth
"""
batch_size=int(U.get_shape()[0])
......@@ -235,11 +236,11 @@ class CRFAsRNNLayer(TrainableLayer):
nCh=U.get_shape()[-1]
mu = tf.get_variable('Compatibility',initializer=tf.constant(numpy.reshape(numpy.eye(nCh),[1,1,1,nCh,nCh]),dtype=tf.float32))
kernel_weights = [tf.get_variable("FilterWeights"+str(idx), shape=[1,1,1,1,nCh], initializer=tf.zeros_initializer()) for idx,k in enumerate(permutohedrals)]
for t in range(self._T):
H1.append(ftheta(U,H1[-1],permutohedrals,mu,kernel_weights, aspect_ratio=self._aspect_ratio,name=self._name+str(t)))
return H1[-1]
# -*- coding: utf-8 -*-
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
# Modifications copyright 2017 The NiftyNet Authors.
#
......
# -*- coding: utf-8 -*-
"""
Resampler layer initially implemented in
https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyNet/blob/v0.2.0.post1/niftynet/layer/spatial_transformer.py
......
# -*- coding: utf-8 -*-
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
# Modifications copyright 2017 The NiftyNet Authors.
#
......
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from functools import wraps
......
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import argparse
import math
......
# -*- coding: utf-8 -*-
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
# Modifications copyright 2017 The NiftyNet Authors. All Rights Reserved.
#
......
# -*- coding: utf-8 -*-
# regular expressions to match tuples from user inputs
# kindly provided by
# Luis Carlos Garcia Peraza Herrera <luis.herrera.14@ucl.ac.uk>
......
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
def get_niftynet_version_string():
......@@ -40,20 +42,35 @@ def get_niftynet_version():
def get_niftynet_git_version():
"""Return a version string based on the git repository, conforming to PEP440"""
"""
Return a version string based on the git repository,
conforming to PEP440
"""
from subprocess import check_output
# Describe the version relative to last tag
command_git = ['git', 'describe', '--match', 'v[0-9]*']
version_buf = check_output(command_git, stderr=open('/dev/null', 'w')).rstrip()
version_buf = check_output(command_git,
stderr=open('/dev/null', 'w')).rstrip()
# Exclude the 'v' for PEP440 conformity, see
# https://www.python.org/dev/peps/pep-0440/#public-version-identifiers
version_buf = version_buf[1:]
# Split the git describe output, as it may not be a tagged commit
tokens = version_buf.split('-')
try:
# converting if string returned as bytes object
# (not Unicode str object)
version_buf = version_buf.decode('utf-8')
except AttributeError:
pass
try:
tokens = version_buf.split('-')
except TypeError:
tokens = ['unknown token']
if len(tokens) > 1: # not a tagged commit
# Format a developmental release identifier according to PEP440, see:
# https://www.python.org/dev/peps/pep-0440/#developmental-releases
......
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from packaging import version
import re
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment