Commit 9374592f authored by Marc Modat's avatar Marc Modat

Change in the pyramidal approach (downsampling). Image dimension can be smaller than 32 voxels now.

parent be1a5ce8
......@@ -117,7 +117,7 @@ void Usage(char *exec)
printf("\t-affFlirt <filename>\tFilename which contains an input affine transformation from Flirt [none]\n");
printf("\t-tmask <filename>\tFilename of a mask image in the target space\n");
printf("\t-result <filename>\tFilename of the resampled image [outputResult.nii]\n");
printf("\t-maxit <int>\t\tNumber of iteration per level [3]\n");
printf("\t-maxit <int>\t\tNumber of iteration per level [5]\n");
printf("\t-smooT <float>\t\tSmooth the target image using the specified sigma (mm) [0]\n");
printf("\t-smooS <float>\t\tSmooth the source image using the specified sigma (mm) [0]\n");
printf("\t-ln <int>\t\tNumber of level to perform [3]\n");
......@@ -127,7 +127,7 @@ void Usage(char *exec)
printf("\t-bgi <int> <int> <int>\tForce the background value during\n\t\t\t\tresampling to have the same value as this voxel in the source image [none]\n");
printf("\t-%%v <int>\t\tPercentage of block to use [20]\n");
printf("\t-%%v <int>\t\tPercentage of block to use [50]\n");
printf("\t-%%i <int>\t\tPercentage of inlier for the LTS [50]\n");
#ifdef _USE_CUDA
printf("\t-gpu \t\t\tTo use the GPU implementation [no]\n");
......@@ -167,7 +167,7 @@ int main(int argc, char **argv)
flag->affineFlag=1;
flag->rigidFlag=1;
param->block_percent_to_use=20;
param->block_percent_to_use=50;
param->inlier_lts=50;
flag->alignCenterFlag=1;
......@@ -270,7 +270,7 @@ int main(int argc, char **argv)
if(!flag->levelNumberFlag) param->levelNumber=3;
/* Read the maximum number of iteration */
if(!flag->maxIterationFlag) param->maxIteration=3;
if(!flag->maxIterationFlag) param->maxIteration=5;
if(!flag->level2PerformFlag) param->level2Perform=param->levelNumber;
......@@ -423,6 +423,7 @@ int main(int argc, char **argv)
reg_downsampleImage<PrecisionTYPE>(tempMaskImage, 0);
}
}
targetMask = (int *)malloc(targetImage->nvox*sizeof(int));
if(flag->targetMaskFlag){
reg_tool_binaryImage2int(tempMaskImage, targetMask, activeVoxelNumber);
......@@ -434,10 +435,14 @@ int main(int argc, char **argv)
}
/* smooth the input image if appropriate */
if(flag->targetSigmaFlag)
reg_gaussianSmoothing<PrecisionTYPE>(targetImage, param->targetSigmaValue);
if(flag->sourceSigmaFlag)
reg_gaussianSmoothing<PrecisionTYPE>(sourceImage, param->sourceSigmaValue);
if(flag->targetSigmaFlag){
bool smoothAxis[8]={true,true,true,true,true,true,true,true};
reg_gaussianSmoothing<PrecisionTYPE>(targetImage, param->targetSigmaValue, smoothAxis);
}
if(flag->sourceSigmaFlag){
bool smoothAxis[8]={true,true,true,true,true,true,true,true};
reg_gaussianSmoothing<PrecisionTYPE>(sourceImage, param->sourceSigmaValue, smoothAxis);
}
/* allocate the deformation Field image */
nifti_image *positionFieldImage = nifti_copy_nim_info(targetImage);
......
......@@ -86,9 +86,6 @@ int main(int argc, char **argv)
maskImage[i]=i;
}
nifti_set_filenames(targetImage, "temp.nii", 0, 0);
nifti_image_write(targetImage);
// Deformation field image is created
dim_img[0]=5;
dim_img[1]=dimension;
......@@ -167,51 +164,18 @@ nifti_image_write(targetImage);
nodeNMIGradientImage->nbyper = sizeof(float);
nodeNMIGradientImage->data = (void *)calloc(nodeNMIGradientImage->nvox, nodeNMIGradientImage->nbyper);
_reg_blockMatchingParam blockMatchingParams;
initialise_block_matching_method( targetImage,
&blockMatchingParams,
100, // percentage of block kept
50, // percentage of inlier in the optimisation process
maskImage);
// GPU VARIABLE INITIALISATION AND ALLOCATION
float *targetImageArray_d;
if(cudaCommon_allocateArrayToDevice<float>(&targetImageArray_d, targetImage->dim)) return 1;
if(cudaCommon_transferNiftiToArrayOnDevice<float>(&targetImageArray_d, targetImage)) return 1;
cudaArray *sourceImageArray_d;
if(cudaCommon_allocateArrayToDevice<float>(&sourceImageArray_d, sourceImage->dim)) return 1;
if(cudaCommon_transferNiftiToArrayOnDevice<float>(&sourceImageArray_d,sourceImage)) return 1;
float *resultImageArray_d;
if(cudaCommon_allocateArrayToDevice<float>(&resultImageArray_d, targetImage->dim)) return 1;
float4 *controlPointImageArray_d;
if(cudaCommon_allocateArrayToDevice<float4>(&controlPointImageArray_d, controlPointImage->dim)) return 1;
if(cudaCommon_transferNiftiToArrayOnDevice<float4>(&controlPointImageArray_d,controlPointImage)) return 1;
float4 *bestControlPointPosition_d;
if(cudaCommon_allocateArrayToDevice<float4>(&bestControlPointPosition_d, controlPointImage->dim)) return 1;
if(cudaCommon_transferNiftiToArrayOnDevice<float4>(&bestControlPointPosition_d,controlPointImage)) return 1;
float *logJointHistogram_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&logJointHistogram_d, binning*(binning+2)*sizeof(float)));
float4 *voxelNMIGradientArray_d;
if(cudaCommon_allocateArrayToDevice(&voxelNMIGradientArray_d, resultImage->dim)) return 1;
float4 *nodeNMIGradientArray_d;
if(cudaCommon_allocateArrayToDevice(&nodeNMIGradientArray_d, controlPointImage->dim)) return 1;
int *targetMask_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&targetMask_d, targetImage->nvox*sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpy(targetMask_d, maskImage, targetImage->nvox*sizeof(int), cudaMemcpyHostToDevice));
float4 *positionFieldImageArray_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&positionFieldImageArray_d, targetImage->nvox*sizeof(float4)));
float4 *resultGradientArray_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&resultGradientArray_d, targetImage->nvox*sizeof(float4)));
int *activeBlock_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&activeBlock_d,
blockMatchingParams.blockNumber[0]*blockMatchingParams.blockNumber[1]*blockMatchingParams.blockNumber[2]*sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpy(activeBlock_d, blockMatchingParams.activeBlock,
blockMatchingParams.blockNumber[0]*blockMatchingParams.blockNumber[1]*blockMatchingParams.blockNumber[2]*sizeof(int),
cudaMemcpyHostToDevice));
float *targetPosition_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&targetPosition_d, blockMatchingParams.activeBlockNumber*3*sizeof(float)));
float *resultPosition_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&resultPosition_d, blockMatchingParams.activeBlockNumber*3*sizeof(float)));
float4 *deformationFieldImageArray_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&deformationFieldImageArray_d, targetImage->nvox*sizeof(float4)));
time_t start,end;
int minutes, seconds, cpuTime, gpuTime, maxIt;
......@@ -248,7 +212,7 @@ nifti_image_write(targetImage);
for(int i=0; i<maxIt; ++i){
reg_affine_positionField_gpu( affineTransformation,
targetImage,
&positionFieldImageArray_d);
&deformationFieldImageArray_d);
}
time(&end);
gpuTime=(end-start);
......@@ -260,6 +224,9 @@ nifti_image_write(targetImage);
// SPLINE DEFORMATION FIELD CREATION
float4 *controlPointImageArray_d;
if(cudaCommon_allocateArrayToDevice<float4>(&controlPointImageArray_d, controlPointImage->dim)) return 1;
if(cudaCommon_transferNiftiToArrayOnDevice<float4>(&controlPointImageArray_d,controlPointImage)) return 1;
maxIt=50000 / dimension;
time(&start);
for(int i=0; i<maxIt; ++i){
......@@ -279,7 +246,7 @@ nifti_image_write(targetImage);
reg_bspline_gpu(controlPointImage,
targetImage,
&controlPointImageArray_d,
&positionFieldImageArray_d,
&deformationFieldImageArray_d,
&targetMask_d,
targetImage->nvox);
}
......@@ -292,6 +259,8 @@ nifti_image_write(targetImage);
printf("Spline deformation done\n");
// LINEAR INTERPOLATION
float *resultImageArray_d;
if(cudaCommon_allocateArrayToDevice<float>(&resultImageArray_d, targetImage->dim)) return 1;
maxIt=100000 / dimension;
time(&start);
for(int i=0; i<maxIt; ++i){
......@@ -314,7 +283,7 @@ nifti_image_write(targetImage);
sourceImage,
&resultImageArray_d,
&sourceImageArray_d,
&positionFieldImageArray_d,
&deformationFieldImageArray_d,
&targetMask_d,
targetImage->nvox,
0);
......@@ -327,40 +296,9 @@ nifti_image_write(targetImage);
fprintf(outputFile, "Linear interpolation ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
printf("Linear interpolation done\n");
// BLOCK MATCHING
maxIt=2000 / dimension;
time(&start);
for(int i=0; i<maxIt; ++i){
block_matching_method<float>( targetImage,
resultImage,
&blockMatchingParams,
maskImage);
}
time(&end);
cpuTime=(end-start);
minutes = (int)floorf(float(cpuTime)/60.0f);
seconds = (int)(cpuTime - 60*minutes);
fprintf(outputFile, "CPU - %i block matching computations - %i min %i sec\n", maxIt, minutes, seconds);
time(&start);
for(int i=0; i<maxIt; ++i){
block_matching_method_gpu( targetImage,
resultImage,
&blockMatchingParams,
&targetImageArray_d,
&resultImageArray_d,
&targetPosition_d,
&resultPosition_d,
&activeBlock_d);
}
time(&end);
gpuTime=(end-start);
minutes = (int)floorf(float(gpuTime)/60.0f);
seconds = (int)(gpuTime - 60*minutes);
fprintf(outputFile, "GPU - %i block matching computations - %i min %i sec\n", maxIt, minutes, seconds);
fprintf(outputFile, "Block-Matching ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
printf("Block-matching done\n");
// SPATIAL GRADIENT COMPUTATION
float4 *resultGradientArray_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&resultGradientArray_d, targetImage->nvox*sizeof(float4)));
maxIt=100000 / dimension;
time(&start);
for(int i=0; i<maxIt; ++i){
......@@ -381,7 +319,7 @@ nifti_image_write(targetImage);
reg_getSourceImageGradient_gpu( targetImage,
sourceImage,
&sourceImageArray_d,
&positionFieldImageArray_d,
&deformationFieldImageArray_d,
&resultGradientArray_d,
targetImage->nvox);
}
......@@ -391,6 +329,9 @@ nifti_image_write(targetImage);
seconds = (int)(gpuTime - 60*minutes);
fprintf(outputFile, "GPU - %i spatial gradient computations - %i min %i sec\n", maxIt, minutes, seconds);
fprintf(outputFile, "Spatial gradient ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
cudaCommon_free( &sourceImageArray_d );
nifti_image_free(sourceImage);
cudaCommon_free( (void **)&deformationFieldImageArray_d );
printf("Spatial gradient done\n");
// JOINT HISTOGRAM COMPUTATION
......@@ -406,6 +347,8 @@ nifti_image_write(targetImage);
// VOXEL-BASED NMI GRADIENT COMPUTATION
float4 *voxelNMIGradientArray_d;
if(cudaCommon_allocateArrayToDevice(&voxelNMIGradientArray_d, resultImage->dim)) return 1;
maxIt=100000 / dimension;
time(&start);
for(int i=0; i<maxIt; ++i){
......@@ -424,6 +367,14 @@ nifti_image_write(targetImage);
minutes = (int)floorf(float(cpuTime)/60.0f);
seconds = (int)(cpuTime - 60*minutes);
fprintf(outputFile, "CPU - %i voxel-based NMI gradient computations - %i min %i sec\n", maxIt, minutes, seconds);
float *logJointHistogram_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&logJointHistogram_d, binning*(binning+2)*sizeof(float)));
float *tempB=(float *)malloc(binning*(binning+2)*sizeof(float));
for(int i=0; i<binning*(binning+2);i++){
tempB[i]=(float)logJointHistogram[i];
}
CUDA_SAFE_CALL(cudaMemcpy(logJointHistogram_d, tempB, binning*(binning+2)*sizeof(float), cudaMemcpyHostToDevice));
free(tempB);
time(&start);
for(int i=0; i<maxIt; ++i){
reg_getVoxelBasedNMIGradientUsingPW_gpu(targetImage,
......@@ -444,9 +395,14 @@ nifti_image_write(targetImage);
seconds = (int)(gpuTime - 60*minutes);
fprintf(outputFile, "GPU - %i voxel-based NMI gradient computations - %i min %i sec\n", maxIt, minutes, seconds);
fprintf(outputFile, "Voxel-based NMI gradient ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
cudaCommon_free((void **)&logJointHistogram_d);
cudaCommon_free((void **)&resultGradientArray_d);
CUDA_SAFE_CALL(cudaFree(targetMask_d));
printf("Voxel-based NMI gradient done\n");
// NODE-BASED NMI GRADIENT COMPUTATION
float4 *nodeNMIGradientArray_d;
if(cudaCommon_allocateArrayToDevice(&nodeNMIGradientArray_d, controlPointImage->dim)) return 1;
maxIt=10000 / dimension;
int smoothingRadius[3];
smoothingRadius[0] = (int)floor( 2.0*controlPointImage->dx/targetImage->dx );
......@@ -478,6 +434,8 @@ nifti_image_write(targetImage);
seconds = (int)(gpuTime - 60*minutes);
fprintf(outputFile, "GPU - %i node-based NMI gradient computations - %i min %i sec\n", maxIt, minutes, seconds);
fprintf(outputFile, "Node-based NMI gradient ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
cudaCommon_free((void **)&voxelNMIGradientArray_d);
cudaCommon_free((void **)&nodeNMIGradientArray_d);
printf("Node-based NMI gradient done\n");
// BENDING ENERGY COMPUTATION
......@@ -531,8 +489,61 @@ nifti_image_write(targetImage);
seconds = (int)(gpuTime - 60*minutes);
fprintf(outputFile, "GPU - %i BE gradient computations - %i min %i sec\n", maxIt, minutes, seconds);
fprintf(outputFile, "BE gradient ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
cudaCommon_free( (void **)&controlPointImageArray_d );
printf("BE gradient done\n");
// BLOCK MATCHING
_reg_blockMatchingParam blockMatchingParams;
initialise_block_matching_method( targetImage,
&blockMatchingParams,
100, // percentage of block kept
50, // percentage of inlier in the optimisation process
maskImage);
int *activeBlock_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&activeBlock_d,
blockMatchingParams.blockNumber[0]*blockMatchingParams.blockNumber[1]*blockMatchingParams.blockNumber[2]*sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpy(activeBlock_d, blockMatchingParams.activeBlock,
blockMatchingParams.blockNumber[0]*blockMatchingParams.blockNumber[1]*blockMatchingParams.blockNumber[2]*sizeof(int),
cudaMemcpyHostToDevice));
float *targetPosition_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&targetPosition_d, blockMatchingParams.activeBlockNumber*3*sizeof(float)));
float *resultPosition_d;
CUDA_SAFE_CALL(cudaMalloc((void **)&resultPosition_d, blockMatchingParams.activeBlockNumber*3*sizeof(float)));
maxIt=2000 / dimension;
time(&start);
for(int i=0; i<maxIt; ++i){
block_matching_method<float>( targetImage,
resultImage,
&blockMatchingParams,
maskImage);
}
time(&end);
cpuTime=(end-start);
minutes = (int)floorf(float(cpuTime)/60.0f);
seconds = (int)(cpuTime - 60*minutes);
fprintf(outputFile, "CPU - %i block matching computations - %i min %i sec\n", maxIt, minutes, seconds);
time(&start);
for(int i=0; i<maxIt; ++i){
block_matching_method_gpu( targetImage,
resultImage,
&blockMatchingParams,
&targetImageArray_d,
&resultImageArray_d,
&targetPosition_d,
&resultPosition_d,
&activeBlock_d);
}
time(&end);
gpuTime=(end-start);
minutes = (int)floorf(float(gpuTime)/60.0f);
seconds = (int)(gpuTime - 60*minutes);
fprintf(outputFile, "GPU - %i block matching computations - %i min %i sec\n", maxIt, minutes, seconds);
fprintf(outputFile, "Block-Matching ratio - %g time(s)\n\n", (float)cpuTime/(float)gpuTime);
cudaCommon_free((void **)activeBlock_d);
cudaCommon_free((void **)targetPosition_d);
cudaCommon_free((void **)resultPosition_d);
printf("Block-matching done\n");
fclose(outputFile);
/* Monsieur Propre */
......@@ -549,16 +560,7 @@ nifti_image_write(targetImage);
free(logJointHistogram);
cudaCommon_free( (void **)&targetImageArray_d );
cudaCommon_free( &sourceImageArray_d );
cudaCommon_free( (void **)&controlPointImageArray_d );
cudaCommon_free( (void **)&resultImageArray_d );
cudaCommon_free( (void **)&positionFieldImageArray_d );
CUDA_SAFE_CALL(cudaFree(targetMask_d));
cudaCommon_free((void **)&resultGradientArray_d);
cudaCommon_free((void **)&voxelNMIGradientArray_d);
cudaCommon_free((void **)&nodeNMIGradientArray_d);
cudaCommon_free((void **)&bestControlPointPosition_d);
cudaCommon_free((void **)&logJointHistogram_d);
return 0;
}
......
......@@ -575,10 +575,14 @@ int main(int argc, char **argv)
/* smooth the input image if appropriate */
if(flag->targetSigmaFlag)
reg_gaussianSmoothing<PrecisionTYPE>( targetImage, param->targetSigmaValue);
if(flag->sourceSigmaFlag)
reg_gaussianSmoothing<PrecisionTYPE>( sourceImage, param->sourceSigmaValue);
if(flag->targetSigmaFlag){
bool smoothAxis[8]={true,true,true,true,true,true,true,true};
reg_gaussianSmoothing<PrecisionTYPE>(targetImage, param->targetSigmaValue, smoothAxis);
}
if(flag->sourceSigmaFlag){
bool smoothAxis[8]={true,true,true,true,true,true,true,true};
reg_gaussianSmoothing<PrecisionTYPE>(sourceImage, param->sourceSigmaValue, smoothAxis);
}
if(level==0){
if(!flag->inputCPPFlag){
......
......@@ -857,116 +857,125 @@ void reg_tools_addSubMulDivValue( nifti_image *img1,
/* *************************************************************** */
/* *************************************************************** */
template <class PrecisionTYPE, class ImageTYPE>
void reg_gaussianSmoothing1( nifti_image *image,
float sigma)
void reg_gaussianSmoothing1(nifti_image *image,
float sigma,
bool downXYZ[8])
{
ImageTYPE *imagePtr = static_cast<ImageTYPE *>(image->data);
PrecisionTYPE *resultValue=(PrecisionTYPE *)malloc(image->nvox * sizeof(PrecisionTYPE));
for(int n=1; n<=image->dim[0]; n++){
float currentSigma;
if(sigma>0) currentSigma=sigma/image->pixdim[n];
else currentSigma=fabs(sigma); // voxel based if negative value
int radius=(int)ceil(currentSigma*3.0f);
PrecisionTYPE *kernel = new PrecisionTYPE[2*radius+1];
PrecisionTYPE kernelSum=0;
for(int i=-radius; i<=radius; i++){
kernel[radius+i]=(PrecisionTYPE)(exp( -(i*i)/(2.0*currentSigma*currentSigma)) / (currentSigma*2.506628274631)); // 2.506... = sqrt(2*pi)
kernelSum += kernel[radius+i];
}
for(int i=-radius; i<=radius; i++) kernel[radius+i] /= kernelSum;
if(downXYZ[n]==true){
float currentSigma;
if(sigma>0) currentSigma=sigma/image->pixdim[n];
else currentSigma=fabs(sigma); // voxel based if negative value
int radius=(int)ceil(currentSigma*3.0f);
PrecisionTYPE *kernel = new PrecisionTYPE[2*radius+1];
PrecisionTYPE kernelSum=0;
for(int i=-radius; i<=radius; i++){
kernel[radius+i]=(PrecisionTYPE)(exp( -(i*i)/(2.0*currentSigma*currentSigma)) / (currentSigma*2.506628274631)); // 2.506... = sqrt(2*pi)
kernelSum += kernel[radius+i];
}
for(int i=-radius; i<=radius; i++) kernel[radius+i] /= kernelSum;
#ifdef _VERBOSE
printf("[VERBOSE]smoothing dim[%i] radius[%i] kernelSum[%g]\n", n, radius, kernelSum);
printf("[VERBOSE]smoothing dim[%i] radius[%i] kernelSum[%g]\n", n, radius, kernelSum);
#endif
int increment=1;
switch(n){
case 1: increment=1;break;
case 2: increment=image->nx;break;
case 3: increment=image->nx*image->ny;break;
case 4: increment=image->nx*image->ny*image->nz;break;
case 5: increment=image->nx*image->ny*image->nz*image->nt;break;
case 6: increment=image->nx*image->ny*image->nz*image->nt*image->nu;break;
case 7: increment=image->nx*image->ny*image->nz*image->nt*image->nu*image->nv;break;
}
unsigned int index=0;
while(index<image->nvox){
for(int x=0; x<image->dim[n]; x++){
int current = index - increment*radius;
PrecisionTYPE value=0;
for(int j=-radius; j<=radius; j++){
if(-1<current && current<(int)image->nvox){
value += (PrecisionTYPE)(imagePtr[current]*kernel[j+radius]);
}
current += increment;
}
resultValue[index]=value;
index++;
}
}
for(unsigned int i=0; i<image->nvox; i++) imagePtr[i]=(ImageTYPE)resultValue[i];
delete[] kernel;
int increment=1;
switch(n){
case 1: increment=1;break;
case 2: increment=image->nx;break;
case 3: increment=image->nx*image->ny;break;
case 4: increment=image->nx*image->ny*image->nz;break;
case 5: increment=image->nx*image->ny*image->nz*image->nt;break;
case 6: increment=image->nx*image->ny*image->nz*image->nt*image->nu;break;
case 7: increment=image->nx*image->ny*image->nz*image->nt*image->nu*image->nv;break;
}
unsigned int index=0;
while(index<image->nvox){
for(int x=0; x<image->dim[n]; x++){
int current = index - increment*radius;
PrecisionTYPE value=0;
for(int j=-radius; j<=radius; j++){
if(-1<current && current<(int)image->nvox){
value += (PrecisionTYPE)(imagePtr[current]*kernel[j+radius]);
}
current += increment;
}
resultValue[index]=value;
index++;
}
}
for(unsigned int i=0; i<image->nvox; i++) imagePtr[i]=(ImageTYPE)resultValue[i];
delete[] kernel;
}
}
free(resultValue);
}
/* *************************************************************** */
template <class PrecisionTYPE>
void reg_gaussianSmoothing( nifti_image *image,
float sigma)
float sigma,
bool downXYZ[8])
{
if(sigma==0.0) return;
switch(image->datatype){
case NIFTI_TYPE_UINT8:
reg_gaussianSmoothing1<PrecisionTYPE,unsigned char>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,unsigned char>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_INT8:
reg_gaussianSmoothing1<PrecisionTYPE,char>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,char>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_UINT16:
reg_gaussianSmoothing1<PrecisionTYPE,unsigned short>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,unsigned short>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_INT16:
reg_gaussianSmoothing1<PrecisionTYPE,short>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,short>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_UINT32:
reg_gaussianSmoothing1<PrecisionTYPE,unsigned int>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,unsigned int>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_INT32:
reg_gaussianSmoothing1<PrecisionTYPE,int>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,int>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_FLOAT32:
reg_gaussianSmoothing1<PrecisionTYPE,float>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,float>(image, sigma, downXYZ);
break;
case NIFTI_TYPE_FLOAT64:
reg_gaussianSmoothing1<PrecisionTYPE,double>(image, sigma);
reg_gaussianSmoothing1<PrecisionTYPE,double>(image, sigma, downXYZ);
break;
default:
printf("err\treg_smoothImage\tThe image data type is not supported\n");
return;
}
}
template void reg_gaussianSmoothing<float>(nifti_image *, float);
template void reg_gaussianSmoothing<double>(nifti_image *, float);
template void reg_gaussianSmoothing<float>(nifti_image *, float, bool[8]);
template void reg_gaussianSmoothing<double>(nifti_image *, float, bool[8]);
/* *************************************************************** */
/* *************************************************************** */
template <class PrecisionTYPE, class ImageTYPE>
void reg_downsampleImage1(nifti_image *image, int type)
{
bool downXYZ[8]={false,false,false,false,false,false,false,false};
for(int i=1;i<=image->dim[0];i++){
if((image->dim[i]/2)>=32) downXYZ[i]=true;
}
if(type==1){
/* the input image is first smooth */
reg_gaussianSmoothing<float>(image, -0.7f);
reg_gaussianSmoothing<float>(image, -0.7f, downXYZ);
}
/* the values are copied */
ImageTYPE *oldValues = (ImageTYPE *)malloc(image->nvox * image->nbyper);
ImageTYPE *imagePtr = static_cast<ImageTYPE *>(image->data);
memcpy(oldValues, imagePtr, image->nvox*image->nbyper);
free(image->data);
int oldDim[4];
for(int i=1; i<4; i++){
oldDim[i]=image->dim[i];
if(image->dim[i]>1) image->dim[i]=(int)(image->dim[i]/2.0);
if(image->pixdim[i]>0) image->pixdim[i]=image->pixdim[i]*2.0f;
if(image->dim[i]>1 && downXYZ[i]==true) image->dim[i]=(int)(image->dim[i]/2.0);
if(image->pixdim[i]>0 && downXYZ[i]==true) image->pixdim[i]=image->pixdim[i]*2.0f;
}
image->nx=image->dim[1];
image->ny=image->dim[2];
......@@ -982,8 +991,10 @@ void reg_downsampleImage1(nifti_image *image, int type)
for(int i=0; i<3; i++){
for(int j=0; j<3; j++){
oldMat.m[i][j]=image->qto_ijk.m[i][j];
image->qto_xyz.m[i][j]=image->qto_xyz.m[i][j]*2.0f;
image->sto_xyz.m[i][j]=image->sto_xyz.m[i][j]*2.0f;
if(downXYZ[j+1]==true){
image->qto_xyz.m[i][j]=image->qto_xyz.m[i][j]*2.0f;
image->sto_xyz.m[i][j]=image->sto_xyz.m[i][j]*2.0f;
}
}
}
oldMat.m[0][3]=image->qto_ijk.m[0][3];
......
......@@ -34,7 +34,8 @@ void reg_smoothImageForTrilinear( nifti_image *image,
extern "C++" template <class PrecisionTYPE>
void reg_gaussianSmoothing( nifti_image *image,
float sigma);
float sigma,
bool[8]);
extern "C++" template <class PrecisionTYPE>
void reg_downsampleImage(nifti_image *image, int);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment