diff --git a/.DS_Store b/.DS_Store new file mode 100755 index 0000000..373b199 Binary files /dev/null and b/.DS_Store differ diff --git a/.Rhistory b/.Rhistory new file mode 100755 index 0000000..e69de29 diff --git a/._CMakeLists.txt b/._CMakeLists.txt new file mode 100755 index 0000000..3266377 Binary files /dev/null and b/._CMakeLists.txt differ diff --git a/._README.md b/._README.md new file mode 100755 index 0000000..4e6eb47 Binary files /dev/null and b/._README.md differ diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/CMakeLists.txt b/CMakeLists.txt old mode 100644 new mode 100755 index d3e4f74..abed004 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,8 +2,9 @@ project (rh-scripts) cmake_minimum_required (VERSION 2.8) +#/opt/bin was changed to /homes/kovacs/toolbox for local installation if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) - set (CMAKE_INSTALL_PREFIX "/opt/caai" CACHE PATH "default install path" FORCE ) + set (CMAKE_INSTALL_PREFIX "/homes/kovacs/toolbox" CACHE PATH "default install path" FORCE ) endif() # Add python-lib diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 3717ce7..63e9fca --- a/README.md +++ b/README.md @@ -35,6 +35,8 @@ If your training is interrupted, you can resume from last saved checkpoint by ju again, it will automatically resume training if you did not modify any of the parameters in the config. ## HOW TO INSTALL +Ensure CMakeLists.txt is set to copy to a folder, which you can write to. For example change '/opt/bin' to your own toolbox location, '/homes/*username*/toolbox' + ``` mkdir build cd build @@ -43,6 +45,7 @@ make install ``` ## POST INSTALLATION Add "source /opt/caai/toolkit-config.sh" to .bashrc / .bash_profile +In the above replace /opt/caai/ with the location where cnn toolbox is intalled, for instance "/toolbox/". ## KNOWN ISSUES @@ -80,7 +83,8 @@ from CAAI.losses import rmse cnn = CNN(model_name='v1', data_pickle='/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc/data_6fold.pickle', - data_folder='/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc' + data_folder='/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc', + network_architecture = 'custom'#DGK added ) cnn.data_loader = DataGenerator(cnn.config) @@ -114,3 +118,4 @@ cnn.compile_network() cnn.train() ``` + diff --git a/bin/merge_tensorboard_logs.py b/bin/merge_tensorboard_logs.py old mode 100644 new mode 100755 diff --git a/pythontoolkit/._CMakeLists.txt b/pythontoolkit/._CMakeLists.txt new file mode 100755 index 0000000..8574e0c Binary files /dev/null and b/pythontoolkit/._CMakeLists.txt differ diff --git a/pythontoolkit/._jaj_plot.py b/pythontoolkit/._jaj_plot.py new file mode 100755 index 0000000..354e3e7 Binary files /dev/null and b/pythontoolkit/._jaj_plot.py differ diff --git a/pythontoolkit/CMakeLists.txt b/pythontoolkit/CMakeLists.txt old mode 100644 new mode 100755 index 4449b8e..c72a4f3 --- a/pythontoolkit/CMakeLists.txt +++ b/pythontoolkit/CMakeLists.txt @@ -11,6 +11,7 @@ INSTALL(PROGRAMS networks.py train.py predict.py + jaj_plot.py DESTINATION CAAI ) diff --git a/pythontoolkit/jaj_plot.py b/pythontoolkit/jaj_plot.py new file mode 100755 index 0000000..c8f84db --- /dev/null +++ b/pythontoolkit/jaj_plot.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +import numpy as np +import matplotlib.pyplot as plt +import os + +plt.close('all') + +def jaj_plot(im, save_plot = 1, mapc = 0, lim = 0, alph = 1, axis = 1, grid = 1, colorbar = 1, colorbar_lab = ' '): + if lim == 0: + lim = im.min(), im.max() + print(lim) + elif lim == "soft": + lim = -135, 215 + print(lim) + if mapc == 0: + mapc = "gray" + print(mapc) + fov = 500. + v = (0, round(((fov/512)*im.shape[1])), 0, round(((fov/512)*im.shape[0]))) #rescaling the axis + ax = plt.imshow(im, cmap = mapc, clim = lim, alpha = alph, extent = v) + if colorbar == 1: + cbar = plt.colorbar(ax) + cbar.set_label(colorbar_lab)#, rotation = 90) + #set axes + plt.grid(color='gray', alpha = 0.5 , linestyle='-', linewidth=1.5) + if axis == 0: + plt.axis('off') + if grid == 0: + plt.grid('off') + if save_plot == 1: + print('saving plot at ' + os.getcwd()) + plt.savefig(fname = 'test.png') + return ax diff --git a/pythontoolkit/losses.py b/pythontoolkit/losses.py old mode 100644 new mode 100755 index 6ad7174..84a73a5 --- a/pythontoolkit/losses.py +++ b/pythontoolkit/losses.py @@ -1,6 +1,21 @@ from keras import backend as K +import numpy as np def rmse(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) +def dice(y_true, y_pred): + #print(np.shape(y_pred)) + smooth = 1 + print('Calculating dice coefficient') + y_true_f = K.flatten(y_true) + y_pred_f = K.flatten(y_pred) + intersection = K.sum(y_true_f * y_pred_f) + return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) +def dice_coef_loss(y_true, y_pred): + #print(np.shape(y_pred)) + #print(np.shape(y_true)) + print(K.int_shape(y_pred)) + print(K.int_shape(y_true)) + return -dice(y_true, y_pred) diff --git a/pythontoolkit/networks.py b/pythontoolkit/networks.py old mode 100644 new mode 100755 index 49124d5..c4e109f --- a/pythontoolkit/networks.py +++ b/pythontoolkit/networks.py @@ -38,26 +38,84 @@ def convt_block(layer, concat, fsize): layer = concatenate([layer, concat], axis=-1) return layer + dropout = [.1,.1,.2,.2,.3,.3,.2,.2,.1] + # ENCODING - block1, dblock1 = conv_block(X,f,.1) - block2, dblock2 = conv_block(dblock1,f*2**1,.1) - block3, dblock3 = conv_block(dblock2,f*2**2,.2) - block4, dblock4 = conv_block(dblock3,f*2**3,.2) - block5, _ = conv_block(dblock4,f*2**4,.3,downsample=False) + block1, dblock1 = conv_block(X,f,dropout[0]) + block2, dblock2 = conv_block(dblock1,f*2**1,dropout[1]) + block3, dblock3 = conv_block(dblock2,f*2**2,dropout[2]) + block4, dblock4 = conv_block(dblock3,f*2**3,dropout[3]) + block5, _ = conv_block(dblock4,f*2**4,dropout[4],downsample=False) # DECODING block7 = convt_block(block5,block4,f*2**3) - block8, _ = conv_block(block7,f*2**3,.3,downsample=False) + block8, _ = conv_block(block7,f*2**3,dropout[5],downsample=False) block9 = convt_block(block8,block3,f*2**2) - block10, _ = conv_block(block9,f*2**2,.2,downsample=False) + block10, _ = conv_block(block9,f*2**2,dropout[6],downsample=False) block11 = convt_block(block10,block2,f*2**1) - block12, _ = conv_block(block11,f*2**1,.2,downsample=False) + block12, _ = conv_block(block11,f*2**1,dropout[7],downsample=False) block13 = convt_block(block12,block1,f) - block14, _ = conv_block(block13,f,.1,downsample=False) + block14, _ = conv_block(block13,f,dropout[8],downsample=False) output = Conv3D(dims_out,kernel_size=3, kernel_regularizer=regularizers.l2(1e-1), kernel_initializer='he_normal', padding='same',strides=1, activation='relu')(block14) return output + +def unet_8_slice(X, config): + print('---------------------') + print('---------------------') + print('Running unet_8_slice') + print('---------------------') + print('---------------------') + f, dims_out = config['n_base_filters'],config['output_channels'] + def conv_block(layer,fsize,dropout,downsample=True): + for i in range(1,3): + layer = Conv3D(fsize, kernel_size=3, kernel_regularizer=regularizers.l2(1e-1), + kernel_initializer='he_normal', padding='same',strides=1)(layer) + layer = BatchNormalization()(layer) + layer = Activation('relu')(layer) + layer = Dropout(dropout)(layer) + if downsample: + downsample = Conv3D(fsize*2, kernel_size=3, kernel_regularizer=regularizers.l2(1e-1), + kernel_initializer='he_normal', padding='same', strides=2)(layer) + downsample = BatchNormalization()(downsample) + downsample = Activation('relu')(downsample) + return layer, downsample + + def convt_block(layer, concat, fsize): + layer = Conv3DTranspose(fsize, kernel_size=3, kernel_regularizer=regularizers.l2(1e-1), + kernel_initializer='he_normal', padding='same', strides=2)(layer) + layer = BatchNormalization()(layer) + layer = Activation('relu')(layer) + layer = concatenate([layer, concat], axis=-1) + return layer + + # Dropout values + dropout = [.1,.1,.2,.3,.2,.2,.1] + + # ENCODING + block1, dblock1 = conv_block(X,f,dropout[0]) + block2, dblock2 = conv_block(dblock1,f*2**1,dropout[1]) + block3, dblock3 = conv_block(dblock2,f*2**2,dropout[2]) + block4, _ = conv_block(dblock3,f*2**3,dropout[3],downsample=False) + + # DECODING + block5 = convt_block(block4,block3,f*2**2) + block6, _ = conv_block(block5,f*2**2,dropout[4],downsample=False) + + block7 = convt_block(block6,block2,f*2**1) + block8, _ = conv_block(block7,f*2**1,dropout[5],downsample=False) + + block9 = convt_block(block8,block1,f) + block10, _ = conv_block(block9,f,dropout[6],downsample=False) + + block11 = Conv3D(dims_out,kernel_size=3, kernel_regularizer=regularizers.l2(1e-1), + kernel_initializer='he_normal', padding='same',strides=1, activation='relu')(block10) + + output = Activation('sigmoid')(block11) + + return output + diff --git a/pythontoolkit/predict.py b/pythontoolkit/predict.py old mode 100644 new mode 100755 diff --git a/pythontoolkit/train.py b/pythontoolkit/train.py old mode 100644 new mode 100755 index a32d24d..ac0e304 --- a/pythontoolkit/train.py +++ b/pythontoolkit/train.py @@ -148,8 +148,9 @@ def build_network(self,inputs=None): outputs = networks.unet(inputs,f=self.config['n_base_filters'],dims_out=self.config['output_channels']) elif self.config['network_architecture'] == 'custom' and not self.custom_network_architecture == None: - outputs = self.custom_network_architecture(inputs,config=self.config) - + #TO_DO: generalize for all custom networks. Fix "config=self.config" + outputs = self.custom_network_architecture(inputs,config=self.config)#config=self.config was exchanged for f=self.config['n_base_filters'],dims_out=self.config['output_channels'] + #outputs = networks.unet_8_slice(inputs,f=self.config['n_base_filters'],dims_out=self.config['output_channels']) else: print("You are using a network that I dont know..") exit(-1) diff --git a/pythontoolkit/version.py b/pythontoolkit/version.py old mode 100644 new mode 100755 diff --git a/reinstall.sh b/reinstall.sh new file mode 100755 index 0000000..531cde6 --- /dev/null +++ b/reinstall.sh @@ -0,0 +1,6 @@ +#reinstall the CNN package +#by running ./reinstall.sh in command-line +mkdir build +cd build +cmake .. +make install \ No newline at end of file diff --git a/scripts/._main.py b/scripts/._main.py new file mode 100755 index 0000000..9ac86ce Binary files /dev/null and b/scripts/._main.py differ diff --git a/scripts/data_generator.py b/scripts/data_generator.py old mode 100644 new mode 100755 diff --git a/scripts/generate_data_pickle.py b/scripts/generate_data_pickle.py old mode 100644 new mode 100755 diff --git a/scripts/main.py b/scripts/main.py old mode 100644 new mode 100755 index 4b2c129..cda066a --- a/scripts/main.py +++ b/scripts/main.py @@ -5,20 +5,36 @@ from data_generator import DataGenerator import pyminc.volumes.factory as pyminc +#Project specific parameters - must be set for each project +data_base_path = '/Volumes/my_passport/project_data/lymphoma-auto-contouring/patient_data_preprocessed/' +pickle_file = 'data_2fold.pickle' +x = 256 #image size in x-direction +y = 256 #image size in y-direction +z = 8 #number of input slices to network +d = 2 #number of input channels +n_slices = 111 #total slices in series +binary_output = 1#set to zero to get binary output by thresholding (for mask prediction) + +#Must be set for each model trained +n_k_fold = 0 #change for each k-fold training in same version +version_num = 1 #change for each new version where new hyperparameters are used + def train_v1(): - cnn = CNN(model_name='v1', - input_patch_shape=(128,128,16), - input_channels=2, + cnn = CNN(model_name='v'+str(version_num), + input_patch_shape=(x,y,z), + input_channels=d, output_channels=1, batch_size=2, epochs=2000, learning_rate=1e-4, checkpoint_save_rate=50, loss_functions=[['mean_absolute_error',1]], - data_pickle='/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc/data_6fold.pickle', - data_folder='/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc', - data_pickle_kfold=1 + data_pickle= data_base_path+pickle_file, + data_folder= data_base_path, + data_pickle_kfold=n_k_fold, + network_architecture = 'unet_8_slice', #more archs can be added by configuring networks.py and and "def build_network" in train.py + n_base_filters = 64 #can for instance use 64 or 32 ) # Attach generator @@ -39,36 +55,40 @@ def predict(modelh5name, model_name=None): if model_name: modelbasename = model_name - summary = pickle.load( open('/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc/data_6fold.pickle', 'rb') ) - for pt in summary['valid_1']: + summary = pickle.load( open(data_base_path+pickle_file, 'rb') ) + for pt in summary['valid_'+str(n_k_fold)]: predict_patient(pt,model,modelbasename) def predict_patient(pt,model,modelbasename): - _lowdose_name = "FDG_01_SUV.mnc" - data_folder = '/users/claes/projects/LowdosePET/PETrecon/HjerteFDG_mnc' - fname_dat = os.path.join(data_folder,pt,'dat_01_suv_ctnorm_double.npy') + _predictor_fname = pt+"AVG_TrueX1-6_reshaped.mnc"#Container, hvor prædikterede data indsættes i #project specific + fname_dat = os.path.join(data_base_path,pt,'minc/pet-ct.npy') dat = np.memmap(fname_dat, dtype='double', mode='r') - dat = dat.reshape(128,128,-1,2) + dat = dat.reshape(x,y,-1,2) print("Predicting volume for %s" % pt) - predicted = np.empty((111,128,128)) - x = 128 - y = 128 - z = 16 - d = 2 - for z_index in range(int(z/2),111-int(z/2)): + predicted = np.empty((n_slices,x,y)) + for z_index in range(int(z/2),n_slices-int(z/2)): predicted_stack = model.predict(dat[:,:,z_index-int(z/2):z_index+int(z/2),:].reshape(1,x,y,z,d)) if z_index == int(z/2): for ind in range(int(z/2)): - predicted[ind,:,:] = predicted_stack[0,:,:,ind].reshape(128,128) - if z_index == 111-int(z/2)-1: + predicted[ind,:,:] = predicted_stack[0,:,:,ind].reshape(x,y) + if z_index == n_slices-int(z/2)-1: for ind in range(int(z/2)): - predicted[z_index+ind,:,:] = predicted_stack[0,:,:,int(z/2)+ind].reshape(128,128) - predicted[z_index,:,:] = predicted_stack[0,:,:,int(z/2)].reshape(128,128) + predicted[z_index+ind,:,:] = predicted_stack[0,:,:,int(z/2)+ind].reshape(x,y) + predicted[z_index,:,:] = predicted_stack[0,:,:,int(z/2)].reshape(x,y) predicted_full = predicted predicted_full += np.swapaxes(np.swapaxes(dat[:,:,:,0],2,1),1,0) - out_vol = pyminc.volumeLikeFile(os.path.join(data_folder,pt,_lowdose_name),os.path.join(data_folder,pt,'predicted_'+modelbasename+'_'+_lowdose_name)) + # Create minc file of predicted data. + out_vol = pyminc.volumeLikeFile(os.path.join(data_base_path,pt,'minc',_predictor_fname),os.path.join(data_base_path,pt,'predicted_'+modelbasename+'_'+_predictor_fname)) + + #thresholding (use if you need binary output) + if binary_output ==1: + thres = 0.5 + predicted_full[predicted_full[...,0] > thres] = 1 + predicted_full[predicted_full[...,0] <= thres] = 0 + + #save the data out_vol.data = predicted_full out_vol.writeFile() out_vol.closeVolume() diff --git a/toolkit-config.sh.cmake b/toolkit-config.sh.cmake old mode 100644 new mode 100755