From 2d5934206bf498e2fa3d19ac60b890daeb832717 Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 12:53:05 -0400 Subject: [PATCH 01/20] adds pycharm to gitgnore file --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 7bbc71c..b018858 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# pycharm + +.idea/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] From f07c04e153f99bed1a026f45d63e324a2e2a27ee Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 13:12:41 -0400 Subject: [PATCH 02/20] adds download for train and test file --- .gitignore | 6 ++++++ data/README.MD | 0 data/downloader.sh | 13 +++++++++++++ 3 files changed, 19 insertions(+) create mode 100644 data/README.MD create mode 100644 data/downloader.sh diff --git a/.gitignore b/.gitignore index b018858..fc138e8 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,12 @@ .idea/ +# ignore data files + +data/*.txt +data/train/ +data/test/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/data/README.MD b/data/README.MD new file mode 100644 index 0000000..e69de29 diff --git a/data/downloader.sh b/data/downloader.sh new file mode 100644 index 0000000..15dee8c --- /dev/null +++ b/data/downloader.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# download the guide files if test and train are +if [ ! -f "test.txt" ] +then + wget "https://storage.googleapis.com/uga-dsp/project4/test.txt" +fi +if [ ! -f "train.txt" ] +then + wget "https://storage.googleapis.com/uga-dsp/project4/train.txt" +fi + + From a774bcbc01f72a61c1edd0c7d23f3c6350e83ded Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 13:45:01 -0400 Subject: [PATCH 03/20] adds downloader for test and training set fixes #1 --- data/downloader.sh | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/data/downloader.sh b/data/downloader.sh index 15dee8c..b145d9b 100644 --- a/data/downloader.sh +++ b/data/downloader.sh @@ -10,4 +10,35 @@ then wget "https://storage.googleapis.com/uga-dsp/project4/train.txt" fi +# create folders +if [ ! -d "train" ] +then + mkdir "train" +fi + +if [ ! -d "test" ] +then + mkdir "test" +fi + +echo "staring to download training" +cd test + +while read -r line || [[ -n "$line" ]]; do + echo "$line" + wget "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" + tar -xf "$line.tar" + rm "$line.tar" +done < "../test.txt" + + +echo "downloading train" + +cd ../train +while read -r line || [[ -n "$line" ]]; do + echo "$line" + wget "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" + tar -xf "$line.tar" + rm "$line.tar" +done < "../train.txt" From 286951018d8bff88ae9aae36fd5bee47cc05c63c Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 13:59:17 -0400 Subject: [PATCH 04/20] adds masks to download list --- data/downloader.sh | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/data/downloader.sh b/data/downloader.sh index b145d9b..4fa204d 100644 --- a/data/downloader.sh +++ b/data/downloader.sh @@ -1,5 +1,15 @@ #!/usr/bin/env bash + +# this script downloads files in the current directory and creates the structure as : +# --train +# --|--data ( this contains folders with names as hashes ) +# --test +# --|--data ( this contains folders with names as hashes ) +# +# masks are downloaded as "mask.png" in each training folder + + # download the guide files if test and train are if [ ! -f "test.txt" ] then @@ -21,24 +31,30 @@ then mkdir "test" fi -echo "staring to download training" -cd test +echo "downloading train" + +cd train while read -r line || [[ -n "$line" ]]; do echo "$line" wget "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" tar -xf "$line.tar" rm "$line.tar" -done < "../test.txt" - + cd "data/$line" + wget -o "mask.png" "https://storage.googleapis.com/uga-dsp/project4/masks/$line.png" + cd ../.. +done < "../train.txt" -echo "downloading train" -cd ../train +echo "downloading training" +cd ../test while read -r line || [[ -n "$line" ]]; do echo "$line" wget "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" tar -xf "$line.tar" rm "$line.tar" -done < "../train.txt" +done < "../test.txt" + + +echo "over and out" \ No newline at end of file From b3b84bcbcf8f035eef97e44c9893445ef2e459fe Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 14:04:41 -0400 Subject: [PATCH 05/20] adds info to downloader readme --- data/README.MD | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/data/README.MD b/data/README.MD index e69de29..ccbf838 100644 --- a/data/README.MD +++ b/data/README.MD @@ -0,0 +1,17 @@ +# Downloader + +file 'downloader.sh' is a bash script to download files from the google storage. URLs follow the instructions. + +if other sources are being considerd, the URLs should be changed + +to run the downloader use : + + $bash downloader.sh + +## direcotry structure + +after the script is done with the downloads the structure will contain the following folders : + +* train -> data : this will have the tairning samples. Ground truth for each sample will be in the directory and will be named `mask.png` +* test -> data : this folder contains test set for which there is no ground truth. + From 8a320aab12dc4c2a0ec6e626bce7c8d1e1cc9e1d Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 14:42:44 -0400 Subject: [PATCH 06/20] maks file problem solved. fixes #7 --- data/downloader.sh | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/data/downloader.sh b/data/downloader.sh index 4fa204d..80f4929 100644 --- a/data/downloader.sh +++ b/data/downloader.sh @@ -36,13 +36,19 @@ echo "downloading train" cd train while read -r line || [[ -n "$line" ]]; do - echo "$line" - wget "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" - tar -xf "$line.tar" - rm "$line.tar" - cd "data/$line" - wget -o "mask.png" "https://storage.googleapis.com/uga-dsp/project4/masks/$line.png" - cd ../.. + + # check if the sample folder already is there skip the dopwnload + if [ ! -d "data/$line" ] + then + wget -nv "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" + tar -xf "$line.tar" + rm "$line.tar" + cd "data/$line" + wget -v -O "mask.png" "https://storage.googleapis.com/uga-dsp/project4/masks/$line.png" + cd ../.. + echo "$line" + fi + done < "../train.txt" @@ -50,10 +56,13 @@ echo "downloading training" cd ../test while read -r line || [[ -n "$line" ]]; do - echo "$line" - wget "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" - tar -xf "$line.tar" - rm "$line.tar" + if [ ! -d "data/$line" ] + then + wget -nv "https://storage.googleapis.com/uga-dsp/project4/data/$line.tar" + tar -xf "$line.tar" + rm "$line.tar" + echo "$line" + fi done < "../test.txt" From b0c6119d87e3d7a13bb362ee8543a88793acf950 Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 18:23:52 -0400 Subject: [PATCH 07/20] adds preprocessor file --- src/__init__.py | 0 src/preprocessing/__init__.py | 0 src/preprocessing/preprocessor.py | 81 +++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 src/__init__.py create mode 100644 src/preprocessing/__init__.py create mode 100644 src/preprocessing/preprocessor.py diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/preprocessing/__init__.py b/src/preprocessing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/preprocessing/preprocessor.py b/src/preprocessing/preprocessor.py new file mode 100644 index 0000000..53cc58d --- /dev/null +++ b/src/preprocessing/preprocessor.py @@ -0,0 +1,81 @@ +"""Abstract class for preprocessors""" +""" +this class is based on the implementation at : +https://github.com/dsp-uga/team-ball/blob/master/src/preprocessing/preprocessor.py +""" + +import json +import numpy as np +import os + +from numpy import array, zeros + +class preprocessor: + + def __init__(self): + self.data= None + self.name = None + self.x_train = None + self.x_test = None + self.y_train = None + + def loadSample(self, path): + + return None,None, None; + + def preprocess (self): + return None + + def load_from_files(self): + """ + loads images from previously preprocessed files. + :return: a treplet of ( train_x, train_y, test_dic ) + """ + + if (not self.name is None and not self.importPath is None): + train_x = np.load(os.path.join(self.importPath,"x_train_"+ self.name + ".npy")) + train_y = np.load(os.path.join(self.importPath, "y_train_"+ self.name + ".npy")) + test_x = np.load(os.path.join(self.importPath, "x_test_"+ self.name + ".npy")) + + return train_x, train_y, test_x + + return None, None, None + + def save_to_file(self): + """ + saves the preprocessed arays to files, files will be named base on the + preprocessor's name which should be set in init. + + """ + + if (not self.name is None and not self.importPath is None): + np.save(os.path.join(self.importPath, "x_train_" + self.name + ".npy"), self.x_train) + np.save(os.path.join(self.importPath, "y_train_" + self.name + ".npy") , self.y_train) + np.save(os.path.join(self.importPath, "x_test_" + self.name + ".npy") , self.x_test) + + + + + def change_size ( self , source , target_dim=[640,640] ): + """ + this function up samples the image into the given dimensions by padding the image with zeros (the background in the cilia images ) until it + fits the given size. + :param source: the array to resize + :param target_dim: the target size + :return: returns the resized image + """ + + dims = source.shape + if dims[0] != target_dim[0] or dims[1] != target_dim[1]: + """ + if the source is not in the desired format change it to the desired size + """ + temp_mask = np.zeros((target_dim[0], target_dim[1])) + + temp_mask[:dims[0], :dims[1]] = source + + return temp_mask + + else: + # if the image is already in the dessired size, return it. + return source From 8fc067ee68c597b81d6c0551c4837d35706e50a5 Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 19:06:22 -0400 Subject: [PATCH 08/20] adds every other preprocessor --- src/preprocessing/EveryOther.py | 95 +++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 src/preprocessing/EveryOther.py diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py new file mode 100644 index 0000000..027d447 --- /dev/null +++ b/src/preprocessing/EveryOther.py @@ -0,0 +1,95 @@ +""" +this pre processor selects one every "skip_count" images in the sample +for example if the sample has 100 images, out put will have 20 for skip_count =5 + +test samples will have only one image chosen at random +""" +from src.preprocessing.preprocessor import preprocessor +import numpy as np +import cv2 +from numpy import array, zeros +import os +from glob import glob +import cv2 +import json + + +class EveryOther ( preprocessor ): + + def __init__(self, exportPath, trainingPath , testPath , images_size=[640,640], importPath = None , skip_count =5): + self.exportPath = exportPath + self.trainingPath = trainingPath + self.testPath = testPath + self.image_size = images_size + self.importPath = importPath + self.skip_count = skip_count + self.name = "EveryOther_" + str(skip_count) + self.x_test = None + self.y_train = None + self.x_train = None + + + def loadSample ( self, path ): + """ + this funtion loads the images in the sample as one output image + :param paht: path ot the sample this has to be in glob format describing path to TIFF images + :return: returns one image which is the aggregated version of all images in the sample + """ + + # read image files + files = sorted(glob(path)) + imgs = array([imread(f) for f in files]) + + # merge files in to one image + image = imgs.sum(axis=0) + + image = self.change_size( image, self.image_size ) + + print(image.shape) + + return image + + + def preprocess(self): + """ + this funciton preopricess the imagaes into three arays, test_x tarin_x , train_y + :return: + """ + train_x = [] # None #np.array([]) + train_y = [] # None # np.array([]) + + # create the trainig set + if( not self.trainingPath is None): + for sample in sorted( os.listdir(self.trainingPath)) : + images_glob_path = os.path.join( self.trainingPath,sample + "/*.png") + mask_path = os.path.join( self.trainingPath, sample + '/mask.png') + + # load train_y + y = self.change_size(cv2.imread( mask_path, 0)) + + # take under account the skip count and lod the images + for i in range(0, 99, self.skip_count): + temp_x= cv2.imread(os.path.join(self.trainingPath, "/%s/frame%04d.png" % (sample, i))) + train_x.append( self.change_size(temp_x)) + train_y.append(y) + + + # create the test set + test_x = [] + if not self.testPath is None: + for sample in sorted(os.listdir(self.testPath)): + image = cv2.imread(os.path.join(self.testPath, "/%s/frame0050.png" % sample)) + test_x.append(image) + + train_x = array(train_x) + train_y = array(train_y) + test_x = array(test_x) + + self.x_train = train_x + self.x_test = test_x + self.y_train = train_y + + if( not self.exportPath is None): + self.save_to_file() + + return train_x , train_y , test_x \ No newline at end of file From da83feb49181920145f6c35d4d5fca2c800812c8 Mon Sep 17 00:00:00 2001 From: Omid Date: Fri, 23 Mar 2018 19:28:21 -0400 Subject: [PATCH 09/20] adds intial main, fixes minor problems with EveryOther --- src/main.py | 113 ++++++++++++++++++++++++++++++++ src/preprocessing/EveryOther.py | 6 +- 2 files changed, 116 insertions(+), 3 deletions(-) create mode 100644 src/main.py diff --git a/src/main.py b/src/main.py new file mode 100644 index 0000000..40ef226 --- /dev/null +++ b/src/main.py @@ -0,0 +1,113 @@ +""" +this file contains the main runner for the project +""" + +import argparse +import sys +import os +import logging +from src.preprocessing import preprocessor +from src.preprocessing import EveryOther +# from src.postprocessing.postprocessing import postProcess +# from src.Classifiers.Classifier import Classifier +# from src.Classifiers.FCN import FCN_Classifier +# from src.Classifiers.UNet_Classifier import UNET_Classifier + +description = ' ' + +parser = argparse.ArgumentParser(description=description, add_help='How to use', prog='python main.py ') + +parser.add_argument("-d", "--dataset", default="../data/tarin/", + help='Path to the training data [DEFAULT: "data/tarin/"]') + +parser.add_argument("-ts", "--testset", default=None, + help='Path to the testing data [DEFAULT: None]') + +parser.add_argument("-m", "--model", default="FCN", + help='model to be used in the segmentation can be UNET/FCN/NMF [DEFAULT: "FCN"]') + +parser.add_argument("-t", "--train", action="store_true", + help='To ensure a model is being trained') + +parser.add_argument("-p", "--predict", action="store_true", + help='To ensure a segmentation is performed on the test set (This requires --testset to have value)') + +parser.add_argument("-e", "--epoch", default="1024", + help='Sets number of epochs for which the network will be trained') + +parser.add_argument("-b", "--batch", default="4", + help='sets the batch size for training the models') + +parser.add_argument("-pp", "--preprocessor", default="sum", + help='Chooses the Preprcessor to be applied ') + +parser.add_argument("-ep", "--exportpath", default="output", + help='Chooses the path to export model and numpy files') + +parser.add_argument("-lf", "--logfile", default="log.log", + help="Path to the log file, this file will contain the log records") + + +# compile arguments +args = parser.parse_args() + +# setup logging +logging.basicConfig(filename=args.logfile, level=logging.INFO, filemode="w", + format=" %(asctime)s - %(module)s.%(funcName)s - %(levelname)s : %(message)s ") + +the_preprocessor = None +the_Classifier = None + +# set the preprocessor +if (args.preprocessor == "everyother"): + the_preprocessor = EveryOther.EveryOther(images_size=[512, 512],trainingPath=args.dataset, testPath=args.testset, + exportPath=args.exportpath, importPath=args.exportpath ) +else: + the_preprocessor = preprocessor.preprocessor() + +# set the set classifier : +# if (args.model == "FCN"): +# the_Classifier = FCN_Classifier ( ) +# elif args.model == "UNET": +# the_Classifier = UNET_Classifier() +# else: +# the_Classifier = Classifier() + +# -------------- Loading the data + +# try to load pre calculated data : +try: + x_train, y_train, x_test = the_preprocessor.load_from_files() + +except FileNotFoundError: + # if there is no file to load set them as null, they will be loaded autiomatically + x_train, y_train, x_test = None, None, None + +# check if there is no data, read them from input ( this will take time! ) +if ( x_train is None): + logging.info("Loading data from original data") + x_train, y_train, x_test = the_preprocessor.preprocess() + logging.info("Done loading data from original data") +else: + logging.info("data loaded from pre-calculated files") +# --------------- Loading the data + + +# --------------- train model! +if( args.train ): + logging.info("Starting training") + model = the_Classifier.train(x_train=x_train, y_train=y_train , epochs=args.epoch ,batch_size=args.batch) + the_Classifier.saveModel( args.exportpath ) + logging.info("Done with training") +else : + model = the_Classifier.load_model( args.exportpath ) + +# --------------- train model! + +#------------ predict +if( args.predict and x_test ): + # run the prediction + predicted = the_Classifier.predict( x_test ) + + # save the results + postProcess(theDic=predicted,output_file_name="test.json") \ No newline at end of file diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index 027d447..e03f287 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -69,8 +69,8 @@ def preprocess(self): # take under account the skip count and lod the images for i in range(0, 99, self.skip_count): - temp_x= cv2.imread(os.path.join(self.trainingPath, "/%s/frame%04d.png" % (sample, i))) - train_x.append( self.change_size(temp_x)) + temp_x= cv2.imread(os.path.join(self.trainingPath, "%s/frame%04d.png" % (sample, i)),0) + train_x.append(self.change_size(temp_x)) train_y.append(y) @@ -78,7 +78,7 @@ def preprocess(self): test_x = [] if not self.testPath is None: for sample in sorted(os.listdir(self.testPath)): - image = cv2.imread(os.path.join(self.testPath, "/%s/frame0050.png" % sample)) + image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) test_x.append(image) train_x = array(train_x) From f547d7c55b3259f9fa09e6f598b4aa0d13f85adb Mon Sep 17 00:00:00 2001 From: Vamsi Nadella Date: Sat, 24 Mar 2018 20:04:03 -0400 Subject: [PATCH 10/20] Replacing high varient pixels. --- Preprocessor.py | 62 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 Preprocessor.py diff --git a/Preprocessor.py b/Preprocessor.py new file mode 100644 index 0000000..2774e0f --- /dev/null +++ b/Preprocessor.py @@ -0,0 +1,62 @@ +import numpy as np +from glob import glob +from numpy import array +from scipy.misc import imread +import scipy.misc +import sys +import os + +''' +This method takes 3d image sequence as numpy array argument. +The image sequence is then converted to a 2-d matrix with +pixels as rows and pixel values for images as columns. +Then variance of the pixels are calculated and stored in a +variable. The top variant pixels as replaced with a given +pixel value. +''' +def highVariance(imgs,hv=1,pix=125): + timgs = transform(imgs) + varimg = timgs.var(1) + sortvar = sorted(varimg,reverse=True) + sortvar = sortvar[:hv] + a = np.full((1,int(imgs.shape[0])),pix) + for mv in sortvar: + timgs[int(np.where(varimg == mv)[0][0])] = a + return timgs + +''' +Tranforms the given image into a 2d matrix +''' +def transform(imgs): + t_imgs = np.transpose(imgs) + tod_data = t_imgs.reshape(imgs.shape[1]*imgs.shape[2], imgs.shape[0]) + return tod_data + +''' +Saves the modified image matrices to a image. +This function contains the arguments of the +'path' where the image will be saved +'varimgs' the modified image matrices +'imgs' orginal image matrix +''' +def saveImage(path, varimgs, imgs): + timgs = np.transpose(varimgs) + rimgs = timgs.reshape(imgs.shape[0],imgs.shape[1],imgs.shape[2]) + if not os.path.exists(path): + os.makedirs(path) + for i in range (0,len(rimgs)): + scipy.misc.imsave(path+'/img'+str(i)+'.png', rimgs[i]) +''' +Function to load the images from the given path +''' +def loadImgs(path): + files = sorted(glob(path+'/frame*')) + imgs = array([imread(f) for f in files]) + return imgs + + +if __name__ == "__main__": + path = sys.argv[1] + imgs = loadImgs(path) + varImgs = highVariance(imgs) + saveImage(path+"/preprocess",varImgs,imgs) From c7ea62e51cc0bd8fe4abd980226c82461ac269df Mon Sep 17 00:00:00 2001 From: Omid Date: Sun, 25 Mar 2018 09:55:46 -0400 Subject: [PATCH 11/20] adds speedup to preprocessor everyother --- src/main.py | 3 +- src/preprocessing/EveryOther.py | 49 +++++++++++-------------------- src/preprocessing/preprocessor.py | 8 ++--- 3 files changed, 22 insertions(+), 38 deletions(-) diff --git a/src/main.py b/src/main.py index 40ef226..ff64efa 100644 --- a/src/main.py +++ b/src/main.py @@ -41,7 +41,8 @@ parser.add_argument("-pp", "--preprocessor", default="sum", help='Chooses the Preprcessor to be applied ') -parser.add_argument("-ep", "--exportpath", default="output", +parser.add_argument("-ep", "--exportpath", default=None, + help='Chooses the path to export model and numpy files') parser.add_argument("-lf", "--logfile", default="log.log", diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index e03f287..bd67cd8 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -29,61 +29,46 @@ def __init__(self, exportPath, trainingPath , testPath , images_size=[640,640], self.x_train = None - def loadSample ( self, path ): - """ - this funtion loads the images in the sample as one output image - :param paht: path ot the sample this has to be in glob format describing path to TIFF images - :return: returns one image which is the aggregated version of all images in the sample - """ - - # read image files - files = sorted(glob(path)) - imgs = array([imread(f) for f in files]) - - # merge files in to one image - image = imgs.sum(axis=0) - - image = self.change_size( image, self.image_size ) - - print(image.shape) - - return image - def preprocess(self): """ this funciton preopricess the imagaes into three arays, test_x tarin_x , train_y :return: """ - train_x = [] # None #np.array([]) + train_x = [] # None #np.array([]) train_y = [] # None # np.array([]) # create the trainig set if( not self.trainingPath is None): - for sample in sorted( os.listdir(self.trainingPath)) : - images_glob_path = os.path.join( self.trainingPath,sample + "/*.png") + for sample in sorted(os.listdir(self.trainingPath)) : + mask_path = os.path.join( self.trainingPath, sample + '/mask.png') # load train_y y = self.change_size(cv2.imread( mask_path, 0)) + y= np.expand_dims( y, axis=0 ) # take under account the skip count and lod the images - for i in range(0, 99, self.skip_count): - temp_x= cv2.imread(os.path.join(self.trainingPath, "%s/frame%04d.png" % (sample, i)),0) - train_x.append(self.change_size(temp_x)) + t = [ self.change_size(cv2.imread(os.path.join(self.trainingPath, "%s/frame%04d.png" % (sample, i)),0)) for i in range(0, 99, self.skip_count) ] + t = [ np.expand_dims(x, axis=0) for x in t ] + train_x.extend(t) + for i in range( len(t)): train_y.append(y) - # create the test set test_x = [] if not self.testPath is None: for sample in sorted(os.listdir(self.testPath)): - image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) - test_x.append(image) + image = self.change_size(cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0)) + test_x.append(np.expand_dims(image, axis=0)) + + train_x = np.vstack(train_x) + train_y = np.vstack(train_y) + test_x = np.vstack(test_x) - train_x = array(train_x) - train_y = array(train_y) - test_x = array(test_x) + print(train_x.shape) + print(train_y.shape) + print(test_x.shape) self.x_train = train_x self.x_test = test_x diff --git a/src/preprocessing/preprocessor.py b/src/preprocessing/preprocessor.py index 53cc58d..ad8bd25 100644 --- a/src/preprocessing/preprocessor.py +++ b/src/preprocessing/preprocessor.py @@ -49,11 +49,9 @@ def save_to_file(self): """ if (not self.name is None and not self.importPath is None): - np.save(os.path.join(self.importPath, "x_train_" + self.name + ".npy"), self.x_train) - np.save(os.path.join(self.importPath, "y_train_" + self.name + ".npy") , self.y_train) - np.save(os.path.join(self.importPath, "x_test_" + self.name + ".npy") , self.x_test) - - + np.savez_compressed(os.path.join(self.importPath, "x_train_" + self.name + ".npy"), self.x_train) + np.savez_compressed(os.path.join(self.importPath, "y_train_" + self.name + ".npy") , self.y_train) + np.savez_compressed(os.path.join(self.importPath, "x_test_" + self.name + ".npy") , self.x_test) def change_size ( self , source , target_dim=[640,640] ): From 5e2855a56cc1f33595850642230ecb8456ca759c Mon Sep 17 00:00:00 2001 From: Omid Date: Sun, 25 Mar 2018 12:08:21 -0400 Subject: [PATCH 12/20] adds Unet implementation and teh structure for segmentations --- src/main.py | 29 +++-- src/preprocessing/EveryOther.py | 7 +- src/segmentation/UnetSegmenter.py | 210 ++++++++++++++++++++++++++++++ src/segmentation/__init__.py | 0 src/segmentation/segmenter.py | 68 ++++++++++ 5 files changed, 299 insertions(+), 15 deletions(-) create mode 100644 src/segmentation/UnetSegmenter.py create mode 100644 src/segmentation/__init__.py create mode 100644 src/segmentation/segmenter.py diff --git a/src/main.py b/src/main.py index ff64efa..caae069 100644 --- a/src/main.py +++ b/src/main.py @@ -8,6 +8,8 @@ import logging from src.preprocessing import preprocessor from src.preprocessing import EveryOther +from src.segmentation.segmenter import Segmenter +from src.segmentation.UnetSegmenter import UNET_Segmenter # from src.postprocessing.postprocessing import postProcess # from src.Classifiers.Classifier import Classifier # from src.Classifiers.FCN import FCN_Classifier @@ -23,7 +25,7 @@ parser.add_argument("-ts", "--testset", default=None, help='Path to the testing data [DEFAULT: None]') -parser.add_argument("-m", "--model", default="FCN", +parser.add_argument("-m", "--model", default="unet", help='model to be used in the segmentation can be UNET/FCN/NMF [DEFAULT: "FCN"]') parser.add_argument("-t", "--train", action="store_true", @@ -57,22 +59,21 @@ format=" %(asctime)s - %(module)s.%(funcName)s - %(levelname)s : %(message)s ") the_preprocessor = None -the_Classifier = None +the_Segmenter = None # set the preprocessor if (args.preprocessor == "everyother"): - the_preprocessor = EveryOther.EveryOther(images_size=[512, 512],trainingPath=args.dataset, testPath=args.testset, + the_preprocessor = EveryOther.EveryOther(images_size=[640, 640],trainingPath=args.dataset, testPath=args.testset, exportPath=args.exportpath, importPath=args.exportpath ) else: the_preprocessor = preprocessor.preprocessor() # set the set classifier : -# if (args.model == "FCN"): -# the_Classifier = FCN_Classifier ( ) -# elif args.model == "UNET": -# the_Classifier = UNET_Classifier() -# else: -# the_Classifier = Classifier() + +if args.model == "unet": + the_Segmenter = UNET_Segmenter() +else: + the_Segmenter = Segmenter() # -------------- Loading the data @@ -97,18 +98,18 @@ # --------------- train model! if( args.train ): logging.info("Starting training") - model = the_Classifier.train(x_train=x_train, y_train=y_train , epochs=args.epoch ,batch_size=args.batch) - the_Classifier.saveModel( args.exportpath ) + model = the_Segmenter.train(x_train=x_train, y_train=y_train , epochs=int(args.epoch) ,batch_size=int(args.batch) ) + the_Segmenter.saveModel( args.exportpath ) logging.info("Done with training") else : - model = the_Classifier.load_model( args.exportpath ) + model = the_Segmenter.load_model( args.exportpath ) # --------------- train model! #------------ predict if( args.predict and x_test ): # run the prediction - predicted = the_Classifier.predict( x_test ) + predicted = the_Segmenter.predict( x_test ) # save the results - postProcess(theDic=predicted,output_file_name="test.json") \ No newline at end of file + # postProcess(theDic=predicted,output_file_name="test.json") \ No newline at end of file diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index bd67cd8..782a1c6 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -50,7 +50,7 @@ def preprocess(self): # take under account the skip count and lod the images t = [ self.change_size(cv2.imread(os.path.join(self.trainingPath, "%s/frame%04d.png" % (sample, i)),0)) for i in range(0, 99, self.skip_count) ] - t = [ np.expand_dims(x, axis=0) for x in t ] + t = [ np.expand_dims(x, axis=0) for x in t ] train_x.extend(t) for i in range( len(t)): train_y.append(y) @@ -60,12 +60,17 @@ def preprocess(self): if not self.testPath is None: for sample in sorted(os.listdir(self.testPath)): image = self.change_size(cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0)) + image = (image==2).astype(int) test_x.append(np.expand_dims(image, axis=0)) train_x = np.vstack(train_x) train_y = np.vstack(train_y) test_x = np.vstack(test_x) + train_x = train_x.reshape(train_x.shape + (1,)) + train_y = train_y.reshape(train_y.shape + (1,)) + test_x = test_x.reshape(test_x.shape + (1,)) + print(train_x.shape) print(train_y.shape) print(test_x.shape) diff --git a/src/segmentation/UnetSegmenter.py b/src/segmentation/UnetSegmenter.py new file mode 100644 index 0000000..5b097a0 --- /dev/null +++ b/src/segmentation/UnetSegmenter.py @@ -0,0 +1,210 @@ +""" +this is the implemnetatin of Unet segmentation +code is baed on : +https://github.com/dsp-uga/team-ball/blob/master/src/Classifiers/UNet_Classifier.py + +""" + + +import logging +import numpy as np +import os +import tensorflow as tf +from keras.models import Model + +from keras.layers import Input, merge, concatenate, Conv2D, MaxPooling2D, Activation, UpSampling2D, Dropout, Conv2DTranspose, UpSampling2D, Lambda +from keras.layers.normalization import BatchNormalization as bn +from keras.layers.merge import add +from keras.callbacks import ModelCheckpoint, TensorBoard +from keras.optimizers import RMSprop +from keras import regularizers +from keras import backend as K +from keras.optimizers import Adam +from keras.callbacks import ModelCheckpoint +from keras.layers.merge import add +import numpy as np +from keras.regularizers import l2 +import cv2 +import glob +import h5py +from keras.models import load_model +import os + +from src.segmentation.segmenter import Segmenter + +class UNET_Segmenter(Segmenter): + """ + This class provides the implementation for the UNet classifier + """ + + def __init__(self, loss_function="dice_coef"): + self.trained_model = None + self.classifier_name = "UNET" + self.data = None + + if (loss_function == "dice_coef"): + self.metrics_function = UNET_Segmenter.dice_coef + self.loss_function = UNET_Segmenter.dice_coef_loss + elif loss_function == "dice_coef2": + self.metrics_function = UNET_Segmenter.dice_coef2 + self.loss_function = UNET_Segmenter.dice_coef_loss2 + + def dice_coef2(y_true, y_pred): + """ + this is a modified version of dice score, + :param y_true: ground truth + :param y_pred: predicted + :return: dice score calculated betweenthe actual and predicted versions + """ + y_true_f = K.flatten(y_true) + y_pred_f = K.flatten(y_pred) + + intersection = K.sum(y_true_f * y_pred_f) + U = K.sum(y_true_f * y_pred_f) + + return 1 - intersection / U + + def dice_coef(y_true, y_pred): + """ + This is dice score implemetation + :param y_true: ground truth + :param y_pred: predicted + :return: dice score calculated betweenthe actual and predicted versions + """ + smooth = 1.0 + y_true_f = K.flatten(y_true) + y_pred_f = K.flatten(y_pred) + + print(K.max(y_true)) + + intersection = K.sum(y_true_f * y_pred_f) + return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) + + def dice_coef_loss(y_true, y_pred): + return - UNET_Segmenter.dice_coef(y_true, y_pred) + + def dice_coef_loss2(y_true, y_pred): + return - UNET_Segmenter.dice_coef(y_true, y_pred) + + def load_model(self, import_path): + """ + overrides the load method to add the costum object + :param import_path: directory from which model has to be loaded + """ + self.trained_model = load_model(os.path.join(import_path, self.classifier_name + ".h5"), + custom_objects={ + 'dice_coef_loss': UNET_Segmenter.dice_coef_loss, + 'dice_coef': UNET_Segmenter.dice_coef}) + logging.info("Loaded Model at : " + os.path.join(import_path, self.classifier_name + ".h5")) + + def train(self, x_train, y_train, epochs=1200, batch_size=4): + """ + this is the training function for + :param x_train: + :param y_train: + :return: + """ + l2_lambda = 0.0002 + DropP = 0.3 + kernel_size = 3 + input_shape = (640, 640, 1) + inputs = Input(input_shape) + input_prob = Input(input_shape) + input_prob_inverse = Input(input_shape) + # Conv3D(filters,(3,3,3),sjfsjf) + conv1 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(inputs) + conv1 = bn()(conv1) + conv1 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(conv1) + conv1 = bn()(conv1) + pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) + pool1 = Dropout(DropP)(pool1) + + conv2 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(pool1) + conv2 = bn()(conv2) + conv2 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(conv2) + conv2 = bn()(conv2) + pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) + pool2 = Dropout(DropP)(pool2) + + conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + pool2) + conv3 = bn()(conv3) + conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + conv3) + conv3 = bn()(conv3) + pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) + pool3 = Dropout(DropP)(pool3) + + conv4 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + pool3) + conv4 = bn()(conv4) + conv4 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + conv4) + conv4 = bn()(conv4) + pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) + pool4 = Dropout(DropP)(pool4) + + conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + pool4) + conv5 = bn()(conv5) + conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + conv5) + conv5 = bn()(conv5) + + up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], name='up6', + axis=3) + up6 = Dropout(DropP)(up6) + conv6 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + up6) + conv6 = bn()(conv6) + conv6 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + conv6) + + conv6 = bn()(conv6) + up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], name='up7', + axis=3) + up7 = Dropout(DropP)(up7) + conv7 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + up7) + conv7 = bn()(conv7) + conv7 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(l2_lambda))( + conv7) + conv7 = bn()(conv7) + + up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], name='up8', + axis=3) + up8 = Dropout(DropP)(up8) + conv8 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(up8) + conv8 = bn()(conv8) + conv8 = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(conv8) + conv8 = bn()(conv8) + + up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], name='up9', + axis=3) + up9 = Dropout(DropP)(up9) + conv9 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(up9) + conv9 = bn()(conv9) + conv9 = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same', + kernel_regularizer=regularizers.l2(l2_lambda))(conv9) + conv9 = bn()(conv9) + + conv10 = Conv2D(1, (1, 1), activation='sigmoid', name='conv10')(conv9) + + model = Model(inputs=[inputs], outputs=[conv10]) + model.compile(optimizer=Adam(lr=1e-5), loss=UNET_Segmenter.dice_coef_loss, metrics=[UNET_Segmenter.dice_coef]) + print(model.summary()) + + # training network + model.fit([x_train], [y_train], batch_size=batch_size, epochs=epochs, shuffle=True) + + # set as class's model to be used for prediction + self.trained_model = model + + return model \ No newline at end of file diff --git a/src/segmentation/__init__.py b/src/segmentation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/segmentation/segmenter.py b/src/segmentation/segmenter.py new file mode 100644 index 0000000..99d30de --- /dev/null +++ b/src/segmentation/segmenter.py @@ -0,0 +1,68 @@ +""" +this is a the abstract class for the segmenter +this is based on the script here : +https://github.com/dsp-uga/team-ball/blob/master/src/Classifiers/Classifier.py + +""" +import os +import keras +from keras.models import load_model +import logging +import numpy as np + + +class Segmenter : + + def __init__(self): + self.trained_model = None + self.data = None + self.classifier_name = None + + return + + def saveModel(self , export_path ): + """ + this function is in charge of saving the model and it's weights + :return: + """ + if( self.trained_model ): + self.trained_model.save( os.path.join( export_path , self.classifier_name+ ".h5" ) ) + + logging.info( "Saved Model to : "+os.path.join( export_path , self.classifier_name+ ".h5" )) + return + + def load_model(self , import_path): + """ + loads model from file + :param import_path: + :return: returns the model and sets it as the model in the class as well + """ + self.trained_model = load_model( os.path.join( import_path , self.classifier_name+ ".h5" )) + logging.info("Loaded Model at : " + os.path.join(import_path, self.classifier_name + ".h5")) + return self.trained_model + + def train(self, x_train , y_train): + """ + this funciton trains the model which is defined in it's body and + saves the model in the class for further prediction + :return: + """ + + return self.trained_model + + def predict(self, data_dic): + """ + this function runs the prediction on the data + :param data_dic: + :return: the predicted values + """ + + # X_test = X_test.reshape(X_test.shape + (1,)) + ret={} + for item in data_dic: + + temp =np.array([data_dic[item].reshape(data_dic[item].shape + (1,))]) + ret[item] =np.uint8( self.trained_model.predict(temp)[0].squeeze(axis=2) *255) + # ret = [{x: self.trained_model.predict()[0]} for x in data_dic] + + return ret \ No newline at end of file From 269badbc42cbd3d7f71bf7eeba0bc693942f787c Mon Sep 17 00:00:00 2001 From: Omid Date: Sun, 25 Mar 2018 13:47:28 -0400 Subject: [PATCH 13/20] adds postprocessing logic --- src/postprocessing/Postprocessor.py | 59 +++++++++++++++++++++++++++++ src/postprocessing/__init__.py | 0 2 files changed, 59 insertions(+) create mode 100644 src/postprocessing/Postprocessor.py create mode 100644 src/postprocessing/__init__.py diff --git a/src/postprocessing/Postprocessor.py b/src/postprocessing/Postprocessor.py new file mode 100644 index 0000000..6c21e53 --- /dev/null +++ b/src/postprocessing/Postprocessor.py @@ -0,0 +1,59 @@ +""""This file manages the postprocessing of the objects in the images +this is based on script : +https://github.com/dsp-uga/team-ball/blob/master/src/postprocessing/postprocessing.py +""" + +import numpy as np +import cv2 +import json +import os + + +def postProcess (theDic, output_path, size_dic): + """ + this function handels the postprocessing of values, + :param fileNames: if out put is written to files and funciton is supposed to load them from files this dictionary contains their names in format {"sampleName":"filename"} + :param theDic: if output is to be loaded from a dictionary, this is the dictionary in format { "sampleName" : ValueArray } + :param output_file_name: name of the file to which the json results should be written + """ + + def downsize ( inp , size): + return inp[ :size[0], :size[1] ] + + # validate input: + if not(theDic): + raise ValueError('One of the values filename or theDic has to be set!') + + + file_name_values_dic =theDic + + final_dic = [] + + for key in file_name_values_dic : + theImage = downsize( file_name_values_dic[key] , size_dic[key] ) + theImage = 2 * theImage + cv2.imwrite( os.path.join( output_path, key+".png" ) , theImage ) + # find connected components + # x, markers = cv2.connectedComponents( file_name_values_dic[key] ) + # + # #convert them to the writeable format + # temp = [] + # for i in range (1,x) : + # temp.append( list( [int(pick[0]),int(pick[1])] for pick in np.argwhere(markers==i) )) + # + # # convert lists to dictionaries + # temp = [ {"coordinates": pick } for pick in temp ] + # + # # add sample name info to the dictionary + # temp_dic = { "dataset": key, + # "regions": temp + # } + # + # # add to the completed object + # final_dic.append( temp_dic ) + # + # # create json file string + # json_dump = json.dumps( final_dic, indent=4 ) + # # save the json file string + # with open( output_file_name,'w' ) as file: + # file.write(json_dump) diff --git a/src/postprocessing/__init__.py b/src/postprocessing/__init__.py new file mode 100644 index 0000000..e69de29 From a3795f7de50f14aa460db83141c077d50c55c228 Mon Sep 17 00:00:00 2001 From: Omid Date: Sun, 25 Mar 2018 13:47:53 -0400 Subject: [PATCH 14/20] fixes dimension issues with preprocessor --- src/main.py | 26 +++++++++++++++++--------- src/preprocessing/EveryOther.py | 27 ++++++++++++++++----------- src/preprocessing/preprocessor.py | 2 +- src/segmentation/segmenter.py | 20 +++++++++++--------- 4 files changed, 45 insertions(+), 30 deletions(-) diff --git a/src/main.py b/src/main.py index caae069..5b64ff5 100644 --- a/src/main.py +++ b/src/main.py @@ -14,7 +14,7 @@ # from src.Classifiers.Classifier import Classifier # from src.Classifiers.FCN import FCN_Classifier # from src.Classifiers.UNet_Classifier import UNET_Classifier - +from src.postprocessing.Postprocessor import postProcess description = ' ' parser = argparse.ArgumentParser(description=description, add_help='How to use', prog='python main.py ') @@ -47,6 +47,11 @@ help='Chooses the path to export model and numpy files') +parser.add_argument("-o", "--output", default=None, + + help='sets the path for the output files to be stored') + + parser.add_argument("-lf", "--logfile", default="log.log", help="Path to the log file, this file will contain the log records") @@ -78,17 +83,17 @@ # -------------- Loading the data # try to load pre calculated data : -try: - x_train, y_train, x_test = the_preprocessor.load_from_files() - -except FileNotFoundError: +# try: +# x_train, y_train, x_test, test_size_ref = the_preprocessor.load_from_files() +# +# except FileNotFoundError: # if there is no file to load set them as null, they will be loaded autiomatically - x_train, y_train, x_test = None, None, None +x_train, y_train, x_test, test_size_ref = None, None, None, None # check if there is no data, read them from input ( this will take time! ) if ( x_train is None): logging.info("Loading data from original data") - x_train, y_train, x_test = the_preprocessor.preprocess() + x_train, y_train, x_test, test_size_ref = the_preprocessor.preprocess() logging.info("Done loading data from original data") else: logging.info("data loaded from pre-calculated files") @@ -109,7 +114,10 @@ #------------ predict if( args.predict and x_test ): # run the prediction - predicted = the_Segmenter.predict( x_test ) + predicted={} + for key in x_test : + predicted[key] = the_Segmenter.predict( x_test[key] ) + # save the results - # postProcess(theDic=predicted,output_file_name="test.json") \ No newline at end of file + postProcess(theDic=predicted,output_path=args.output , size_dic=test_size_ref) \ No newline at end of file diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index 782a1c6..bb1da0d 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -56,30 +56,35 @@ def preprocess(self): train_y.append(y) # create the test set - test_x = [] + # test_x = [] + test_dic = {} + test_size_ref = {} if not self.testPath is None: for sample in sorted(os.listdir(self.testPath)): - image = self.change_size(cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0)) - image = (image==2).astype(int) - test_x.append(np.expand_dims(image, axis=0)) + image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) + test_size_ref[sample]= image.shape + image = self.change_size(image) + image = (image==2).astype(int).reshape(image.shape + (1,)) + test_dic[sample] = np.expand_dims(image, axis=0) + # test_x.append(np.expand_dims(image, axis=0)) train_x = np.vstack(train_x) train_y = np.vstack(train_y) - test_x = np.vstack(test_x) + # test_x = np.vstack(test_x) train_x = train_x.reshape(train_x.shape + (1,)) train_y = train_y.reshape(train_y.shape + (1,)) - test_x = test_x.reshape(test_x.shape + (1,)) + #test_x = test_x.reshape(test_x.shape + (1,)) print(train_x.shape) print(train_y.shape) - print(test_x.shape) + # print(test_x.shape) self.x_train = train_x - self.x_test = test_x + # self.x_test = test_x self.y_train = train_y - if( not self.exportPath is None): - self.save_to_file() + # if( not self.exportPath is None): + # self.save_to_file() - return train_x , train_y , test_x \ No newline at end of file + return train_x , train_y , test_dic, test_size_ref \ No newline at end of file diff --git a/src/preprocessing/preprocessor.py b/src/preprocessing/preprocessor.py index ad8bd25..59bc7fc 100644 --- a/src/preprocessing/preprocessor.py +++ b/src/preprocessing/preprocessor.py @@ -51,7 +51,7 @@ def save_to_file(self): if (not self.name is None and not self.importPath is None): np.savez_compressed(os.path.join(self.importPath, "x_train_" + self.name + ".npy"), self.x_train) np.savez_compressed(os.path.join(self.importPath, "y_train_" + self.name + ".npy") , self.y_train) - np.savez_compressed(os.path.join(self.importPath, "x_test_" + self.name + ".npy") , self.x_test) + # np.savez_compressed(os.path.join(self.importPath, "x_test_" + self.name + ".npy") , self.x_test) def change_size ( self , source , target_dim=[640,640] ): diff --git a/src/segmentation/segmenter.py b/src/segmentation/segmenter.py index 99d30de..3ad9741 100644 --- a/src/segmentation/segmenter.py +++ b/src/segmentation/segmenter.py @@ -57,12 +57,14 @@ def predict(self, data_dic): :return: the predicted values """ - # X_test = X_test.reshape(X_test.shape + (1,)) - ret={} - for item in data_dic: - - temp =np.array([data_dic[item].reshape(data_dic[item].shape + (1,))]) - ret[item] =np.uint8( self.trained_model.predict(temp)[0].squeeze(axis=2) *255) - # ret = [{x: self.trained_model.predict()[0]} for x in data_dic] - - return ret \ No newline at end of file + return np.uint(self.trained_model.predict(data_dic)[0]) + # + # # X_test = X_test.reshape(X_test.shape + (1,)) + # ret={} + # for item in data_dic: + # + # temp =np.array([data_dic[item].reshape(data_dic[item].shape + (1,))]) + # ret[item] =np.uint8( self.trained_model.predict(temp)[0].squeeze(axis=2) *255) + # # ret = [{x: self.trained_model.predict()[0]} for x in data_dic] + # + # return ret \ No newline at end of file From f2ec51962166608c4914f39f183282c695e38d2d Mon Sep 17 00:00:00 2001 From: Omid Date: Sun, 25 Mar 2018 14:11:17 -0400 Subject: [PATCH 15/20] restructures the package model after #12 --- Preprocessor.py => src/preprocessing/PixelVariance.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Preprocessor.py => src/preprocessing/PixelVariance.py (100%) diff --git a/Preprocessor.py b/src/preprocessing/PixelVariance.py similarity index 100% rename from Preprocessor.py rename to src/preprocessing/PixelVariance.py From f10708f751157415be587becaa0b7b3a56c1fe94 Mon Sep 17 00:00:00 2001 From: Omid Date: Sun, 25 Mar 2018 14:42:20 -0400 Subject: [PATCH 16/20] changes to cilia only in preprocessing - potentialy does the job for #15 --- src/preprocessing/EveryOther.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index bb1da0d..06ddcaa 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -47,6 +47,7 @@ def preprocess(self): # load train_y y = self.change_size(cv2.imread( mask_path, 0)) y= np.expand_dims( y, axis=0 ) + y=( y==2 ).astype(int) # take under account the skip count and lod the images t = [ self.change_size(cv2.imread(os.path.join(self.trainingPath, "%s/frame%04d.png" % (sample, i)),0)) for i in range(0, 99, self.skip_count) ] From f71eee5a4718a3abca8dfdc53220cb8325326327 Mon Sep 17 00:00:00 2001 From: Omid Date: Mon, 26 Mar 2018 10:54:01 -0400 Subject: [PATCH 17/20] fixes issue with preprocessing --- src/main.py | 2 ++ src/postprocessing/Postprocessor.py | 4 ++-- src/preprocessing/EveryOther.py | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/main.py b/src/main.py index 5b64ff5..1e2a7f6 100644 --- a/src/main.py +++ b/src/main.py @@ -115,7 +115,9 @@ if( args.predict and x_test ): # run the prediction predicted={} + import numpy as np for key in x_test : + print( x_test[key].shape , np.max( x_test[key]) , np.min( x_test[key] ) ) predicted[key] = the_Segmenter.predict( x_test[key] ) diff --git a/src/postprocessing/Postprocessor.py b/src/postprocessing/Postprocessor.py index 6c21e53..fc7779c 100644 --- a/src/postprocessing/Postprocessor.py +++ b/src/postprocessing/Postprocessor.py @@ -30,9 +30,9 @@ def downsize ( inp , size): final_dic = [] for key in file_name_values_dic : - theImage = downsize( file_name_values_dic[key] , size_dic[key] ) + theImage = downsize(file_name_values_dic[key], size_dic[key]) theImage = 2 * theImage - cv2.imwrite( os.path.join( output_path, key+".png" ) , theImage ) + cv2.imwrite( os.path.join( output_path, key+".png"), theImage) # find connected components # x, markers = cv2.connectedComponents( file_name_values_dic[key] ) # diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index 06ddcaa..3e9b2ab 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -65,7 +65,7 @@ def preprocess(self): image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) test_size_ref[sample]= image.shape image = self.change_size(image) - image = (image==2).astype(int).reshape(image.shape + (1,)) + image = image.reshape(image.shape + (1,)) test_dic[sample] = np.expand_dims(image, axis=0) # test_x.append(np.expand_dims(image, axis=0)) From 9392cdc11553d53a65989dd5e7b3ae1ca0b75e35 Mon Sep 17 00:00:00 2001 From: Omid Date: Wed, 28 Mar 2018 13:23:11 -0400 Subject: [PATCH 18/20] adds loading of multiple images form one sample and adding them --- src/preprocessing/EveryOther.py | 22 +++++++++++++++++----- src/segmentation/segmenter.py | 12 +++++++++++- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index 3e9b2ab..d8c9ee7 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -62,11 +62,23 @@ def preprocess(self): test_size_ref = {} if not self.testPath is None: for sample in sorted(os.listdir(self.testPath)): - image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) - test_size_ref[sample]= image.shape - image = self.change_size(image) - image = image.reshape(image.shape + (1,)) - test_dic[sample] = np.expand_dims(image, axis=0) + # image = cv2.imread(os.path.join(self.testPath, "%s/frame0050.png" % sample),0) #/ 255 + # test_size_ref[sample]= image.shape + # image = self.change_size(image) + # image = image.reshape(image.shape + (1,)) + # test_dic[sample] = np.expand_dims(image, axis=0) + print (os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i))) + if '.DS_Store' in sample : continue + + t = [self.change_size(cv2.imread(os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i)), 0)) + for i in range(0, 99, 3)] + + test_size_ref[sample] = t[0].shape + t = [np.expand_dims(x.reshape(x.shape + (1,)), axis=0) for x in t] + + test_dic[sample] = np.vstack(t) + + # test_x.append(np.expand_dims(image, axis=0)) train_x = np.vstack(train_x) diff --git a/src/segmentation/segmenter.py b/src/segmentation/segmenter.py index 3ad9741..dc36570 100644 --- a/src/segmentation/segmenter.py +++ b/src/segmentation/segmenter.py @@ -56,8 +56,18 @@ def predict(self, data_dic): :param data_dic: :return: the predicted values """ + temp = self.trained_model.predict(data_dic) + # print ( type( temp ) ) + # print( temp.shape ) + # print(np.max(temp) , np.mean( temp ), np.min( temp ) ) + temp = (temp>=0.5).astype(int) + temp = np.sum(temp, axis=0) + temp = (temp > 0.5).astype(int) + # print ( temp.shape , np.max( temp ), np.min(temp) ) + + return np.uint(temp) # + # return np.uint(self.trained_model.predict(data_dic)[0]) - return np.uint(self.trained_model.predict(data_dic)[0]) # # # X_test = X_test.reshape(X_test.shape + (1,)) # ret={} From ab2bc788af2399820417453a5a98e583057356e3 Mon Sep 17 00:00:00 2001 From: Omid Date: Wed, 28 Mar 2018 15:39:59 -0400 Subject: [PATCH 19/20] fixes #18 --- src/preprocessing/EveryOther.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/preprocessing/EveryOther.py b/src/preprocessing/EveryOther.py index d8c9ee7..89eb3c2 100644 --- a/src/preprocessing/EveryOther.py +++ b/src/preprocessing/EveryOther.py @@ -70,10 +70,13 @@ def preprocess(self): print (os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i))) if '.DS_Store' in sample : continue - t = [self.change_size(cv2.imread(os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i)), 0)) + t = [cv2.imread(os.path.join(self.testPath, "%s/frame%04d.png" % (sample, i)), 0) for i in range(0, 99, 3)] test_size_ref[sample] = t[0].shape + + t = [ self.change_size(x) for x in t ] + t = [np.expand_dims(x.reshape(x.shape + (1,)), axis=0) for x in t] test_dic[sample] = np.vstack(t) From 688e2a5171802f55c7c0c5d4d251660f1b464dfc Mon Sep 17 00:00:00 2001 From: Omid Date: Wed, 28 Mar 2018 15:49:04 -0400 Subject: [PATCH 20/20] add gitignore --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index fc138e8..bd3e7e4 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,11 @@ data/*.txt data/train/ data/test/ +# ignore output files + +output/ +export/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod]