From 1edb01d26e4bbf04cc3c5e9c2057b1ce14bb341b Mon Sep 17 00:00:00 2001 From: Mariia Date: Sun, 7 Jan 2024 20:52:59 +0100 Subject: [PATCH 01/40] Add Random Forest model --- src/server/dcp_server/config.cfg | 1 + src/server/dcp_server/models.py | 51 ++++++++++++++-- src/server/dcp_server/utils.py | 99 ++++++++++++++++++++++++++++---- 3 files changed, 134 insertions(+), 17 deletions(-) diff --git a/src/server/dcp_server/config.cfg b/src/server/dcp_server/config.cfg index 1771417..984dac6 100644 --- a/src/server/dcp_server/config.cfg +++ b/src/server/dcp_server/config.cfg @@ -18,6 +18,7 @@ "model_type": "cyto" }, "classifier":{ + "model_class": "RandomForest", "in_channels": 1, "num_classes": 3, "black_bg": "False", diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index d4bba11..d7472e6 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -7,12 +7,15 @@ from tqdm import tqdm import numpy as np +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import f1_score + from cellpose.metrics import aggregated_jaccard_index #from segment_anything import SamPredictor, sam_model_registry #from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator -from dcp_server.utils import get_centered_patches, find_max_patch_size, create_patch_dataset +from dcp_server.utils import get_centered_patches, find_max_patch_size, create_patch_dataset, create_dataset_for_rf class CustomCellposeModel(models.CellposeModel, nn.Module): """Custom cellpose model inheriting the attributes and functions from the original CellposeModel and implementing @@ -103,8 +106,8 @@ class CellClassifierFCNN(nn.Module): def __init__(self, model_config, train_config, eval_config): super().__init__() - self.in_channels = model_config["classifier"]["in_channels"] - self.num_classes = model_config["classifier"]["num_classes"] + self.in_channels = model_config["classifier"].get("in_channels",1) + self.num_classes = model_config["classifier"].get("num_classes",3) self.train_config = train_config["classifier"] self.eval_config = eval_config["classifier"] @@ -221,11 +224,20 @@ def __init__(self, model_config, train_config, eval_config): self.train_config = train_config self.eval_config = eval_config + self.classifier_class = self.model_config.get("classifier").get("model_class", "CellClassifierFCNN") + # Initialize the cellpose model and the classifier self.segmentor = CustomCellposeModel(self.model_config, self.train_config, self.eval_config) - self.classifier = CellClassifierFCNN(self.model_config, + + if self.classifier_class == "FCNN": + self.classifier = CellClassifierFCNN(self.model_config, + self.train_config, + self.eval_config) + + elif self.classifier_class == "RandomForest": + self.classifier = CellClassifierShallowModel(self.model_config, self.train_config, self.eval_config) @@ -248,11 +260,15 @@ def train(self, imgs, masks): self.segmentor.train(imgs, masks_instances) # create patch dataset to train classifier masks_classes = list(masks[:,1,...]) #[((mask > 0) * np.arange(1, 4)).sum(-1) for mask in masks] - patches, labels = create_patch_dataset(imgs, + patches, patch_masks, labels = create_patch_dataset(imgs, masks_classes, masks_instances, noise_intensity = self.train_config["classifier"]["train_data"]["noise_intensity"], max_patch_size = self.train_config["classifier"]["train_data"]["patch_size"]) + + if self.classifier_class == "RandomForest": + patches = create_dataset_for_rf(patches, patch_masks) + # train classifier self.classifier.train(patches, labels) @@ -271,10 +287,12 @@ def eval(self, img): noise_intensity = self.eval_config["classifier"]["data"]["noise_intensity"] # get patches centered around detected objects - patches, instance_labels, _ = get_centered_patches(img, + patches, patch_masks, instance_labels, _ = get_centered_patches(img, instance_mask, max_patch_size, noise_intensity=noise_intensity) + if self.classifier_class == "RandomForest": + patches = create_dataset_for_rf(patches, patch_masks) # loop over patches and create classification mask for idx, patch in enumerate(patches): patch_class = self.classifier.eval(patch) # patch size should be HxWxC, e.g. 64,64,3 @@ -286,7 +304,28 @@ def eval(self, img): return final_mask +class CellClassifierShallowModel: + + def __init__(self, model_config, train_config, eval_config): + self.model_config = model_config + self.train_config = train_config + self.eval_config = eval_config + + self.model = RandomForestClassifier() + + + def train(self, X_train, y_train): + + self.model.fit(X_train,y_train) + + y_hat = self.model.predict(X_train) + self.metric = f1_score(y_train, y_hat, average='micro') + + + def eval(self, X_test): + print(f"X_test: {X_test.shape}") + return self.model.predict(X_test.reshape(1,-1)) # class CustomSAMModel(): # # https://github.com/facebookresearch/segment-anything/blob/main/notebooks/automatic_mask_generator_example.ipynb diff --git a/src/server/dcp_server/utils.py b/src/server/dcp_server/utils.py index 86f5466..f1d8a8b 100644 --- a/src/server/dcp_server/utils.py +++ b/src/server/dcp_server/utils.py @@ -3,6 +3,10 @@ import numpy as np from scipy.ndimage import find_objects, center_of_mass from skimage import measure +from copy import deepcopy + +import SimpleITK as sitk +from radiomics import shape2D def read_config(name, config_path = 'config.cfg') -> dict: """Reads the configuration file @@ -72,9 +76,12 @@ def crop_centered_padded_patch(x: np.ndarray, x[m] = 0 if noise_intensity is not None: x[m] = np.random.normal(scale=noise_intensity, size=x[m].shape) + if mask is not None: + mask[m] = 0 patch = x[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] - + if mask is not None: + mask = mask[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] # Calculate the required padding amounts size_x, size_y = x.shape[1], x.shape[0] @@ -83,23 +90,39 @@ def crop_centered_padded_patch(x: np.ndarray, patch = np.hstack(( np.random.normal(scale=noise_intensity, size=(patch.shape[0], abs(left), patch.shape[2])).astype(np.uint8), patch)) + if mask is not None: + mask = np.hstack(( + np.zeros((mask.shape[0], abs(left), mask.shape[2])).astype(np.uint8), + mask)) # Apply padding on the right side if necessary if right > size_x: patch = np.hstack(( patch, np.random.normal(scale=noise_intensity, size=(patch.shape[0], (right - size_x), patch.shape[2])).astype(np.uint8))) + if mask is not None: + mask = np.hstack(( + mask, + np.zeros((mask.shape[0], (right - size_x), mask.shape[2])).astype(np.uint8))) # Apply padding on the top side if necessary if top < 0: patch = np.vstack(( np.random.normal(scale=noise_intensity, size=(abs(top), patch.shape[1], patch.shape[2])).astype(np.uint8), patch)) + if mask is not None: + mask = np.vstack(( + np.zeros((abs(top), mask.shape[1], mask.shape[2])).astype(np.uint8), + mask)) # Apply padding on the bottom side if necessary if bottom > size_y: patch = np.vstack(( patch, np.random.normal(scale=noise_intensity, size=(bottom - size_y, patch.shape[1], patch.shape[2])).astype(np.uint8))) - - return patch + if mask is not None: + mask = np.vstack(( + mask, + np.zeros((bottom - size_y, mask.shape[1], mask.shape[2])).astype(np.uint8))) + + return patch, mask def get_center_of_mass_and_label(mask: np.ndarray) -> np.ndarray: @@ -146,7 +169,7 @@ def get_centered_patches(img, ''' - patches, instance_labels, class_labels = [], [], [] + patches, patch_masks, instance_labels, class_labels = [], [], [], [] # if image is 2D add an additional dim for channels if img.ndim<3: img = img[:, :, np.newaxis] if mask.ndim<3: mask = mask[:, :, np.newaxis] @@ -155,13 +178,17 @@ def get_centered_patches(img, # Crop patches around each center of mass for c, l in zip(centers_of_mass, instance_labels): c_x, c_y = c - patch = crop_centered_padded_patch(img.copy(), + + patch, patch_mask = crop_centered_padded_patch(img.copy(), (c_x, c_y), (p_size, p_size), l, - mask=mask, + mask=deepcopy(mask), noise_intensity=noise_intensity) + + patches.append(patch) + patch_masks.append(patch_mask) if mask_class is not None: # get the class instance for the specific object instance_labels.append(l) @@ -169,7 +196,7 @@ def get_centered_patches(img, #-1 because labels from mask start from 1, we want classes to start from 0 class_labels.append(class_l-1) - return patches, instance_labels, class_labels + return patches, patch_masks, instance_labels, class_labels def get_objects(mask): return find_objects(mask) @@ -212,21 +239,71 @@ def create_patch_dataset(imgs, masks_classes, masks_instances, noise_intensity, the max cell size to define the patch size. All patches and masks should then be returned in the same format as imgs and masks (same type, i.e. check if tensor or np.array and same convention of dims, e.g. CxHxW) + include_mask(bool) : Flag indicating whether to include the mask along with patches. ''' if max_patch_size is None: max_patch_size = np.max([find_max_patch_size(mask) for mask in masks_instances]) - patches, labels = [], [] + patches, patch_masks, labels = [], [], [] for img, mask_class, mask_instance in zip(imgs, masks_classes, masks_instances): # mask_instance has dimension WxH # mask_class has dimension WxH - patch, _, label = get_centered_patches(img, + patch, patch_mask, _, label = get_centered_patches(img, mask_instance, max_patch_size, noise_intensity=noise_intensity, - mask_class=mask_class) + mask_class=mask_class, + ) patches.extend(patch) + patch_masks.extend(patch_mask) labels.extend(label) - return patches, labels \ No newline at end of file + return patches, patch_masks, labels + + +def get_shape_features(img, msk): + + msk = 255 * ((msk) > 0).astype(np.uint8) + + image = sitk.GetImageFromArray(img.squeeze()) + roi_mask = sitk.GetImageFromArray(msk.squeeze()) + + shape_calculator = shape2D.RadiomicsShape2D(inputImage=image, inputMask=roi_mask, label=255) + # # Calculate the shape-based radiomic features + shape_features = shape_calculator.execute() + + return np.array(list(shape_features.values())) + +def extract_intensity_features(image, mask): + + features = {} + + # Ensure the image and mask have the same dimensions + + if image.shape != mask.shape: + raise ValueError("Image and mask must have the same dimensions") + + masked_image = image[(mask>0)] + # features["min_intensity"] = np.min(masked_image) + # features["max_intensity"] = np.max(masked_image) + features["median_intensity"] = np.median(masked_image) + features["mean_intensity"] = np.mean(masked_image) + features["25th_percentile_intensity"] = np.percentile(masked_image, 25) + features["75th_percentile_intensity"] = np.percentile(masked_image, 75) + + return np.array(list(features.values())) + +def create_dataset_for_rf(imgs, masks): + + # X-features, y-labels + X = [] + + for img, msk in zip(imgs, masks): + + shape_features = get_shape_features(img, msk) + intensity_features = extract_intensity_features(img, msk) + features_list = np.concatenate((shape_features,intensity_features), axis=0) + X.append(features_list) + + return X \ No newline at end of file From 929e2db0c963fef02155fe7e088f07c6d6770a67 Mon Sep 17 00:00:00 2001 From: Mariia Date: Sun, 7 Jan 2024 20:53:22 +0100 Subject: [PATCH 02/40] Add tests for Random Forest --- src/server/test/test_config.cfg | 1 + src/server/test/test_integration.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/server/test/test_config.cfg b/src/server/test/test_config.cfg index 04073ed..1573967 100644 --- a/src/server/test/test_config.cfg +++ b/src/server/test/test_config.cfg @@ -18,6 +18,7 @@ "model_type": "cyto" }, "classifier":{ + "model_class": "RandomForest", "in_channels": 1, "num_classes": 3, "black_bg": "False", diff --git a/src/server/test/test_integration.py b/src/server/test/test_integration.py index 05cf39e..989e0d5 100644 --- a/src/server/test/test_integration.py +++ b/src/server/test/test_integration.py @@ -172,7 +172,11 @@ def test_train_eval_run(data_train, data_eval, model): attrs = model.__dict__.keys() if "classifier" in attrs: - assert(model.classifier.loss<0.4) + attrs_classifier = model.classifier.__dict__.keys() + if "loss" in attrs_classifier: + assert(model.classifier.loss<0.4) + if "metric" in attrs_classifier: + assert(model.classifier.metric>0.4) if "metric" in attrs: assert(model.metric>0.1) if "loss" in attrs: From e0f72d44a14abe92dc4e4e4008ef59be00d660af Mon Sep 17 00:00:00 2001 From: Mariia Date: Sun, 14 Jan 2024 16:46:38 +0100 Subject: [PATCH 03/40] fix comments in pr, documentation to be added --- src/server/dcp_server/models.py | 16 +++++++++++----- src/server/requirements.txt | 2 ++ src/server/test/test_integration.py | 12 ++++++------ 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index d7472e6..e636f66 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -8,7 +8,7 @@ import numpy as np from sklearn.ensemble import RandomForestClassifier -from sklearn.metrics import f1_score +from sklearn.metrics import f1_score, log_loss from cellpose.metrics import aggregated_jaccard_index @@ -267,10 +267,14 @@ def train(self, imgs, masks): max_patch_size = self.train_config["classifier"]["train_data"]["patch_size"]) if self.classifier_class == "RandomForest": - patches = create_dataset_for_rf(patches, patch_masks) - + features = create_dataset_for_rf(patches, patch_masks) + self.classifier.train(features, labels) + else: + self.classifier.train(patches, labels) # train classifier - self.classifier.train(patches, labels) + + self.metric = (self.segmentor.metric + self.classifier.metric) / 2 + self.loss = (self.segmentor.loss + self.classifier.loss) / 2 def eval(self, img): # TBD we assume image is either 2D [H, W] (see fsimage storage) @@ -318,9 +322,11 @@ def __init__(self, model_config, train_config, eval_config): def train(self, X_train, y_train): self.model.fit(X_train,y_train) - y_hat = self.model.predict(X_train) + y_hat_proba = self.model.predict_proba(X_train) + self.metric = f1_score(y_train, y_hat, average='micro') + self.loss = log_loss(y_train, y_hat_proba) def eval(self, X_test): diff --git a/src/server/requirements.txt b/src/server/requirements.txt index d954704..ea982b4 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -4,3 +4,5 @@ scikit-image>=0.19.3 torchmetrics>=0.11.4 torch>=2.1.0 pytest>=7.4.3 +scikit-learn +simpleITK diff --git a/src/server/test/test_integration.py b/src/server/test/test_integration.py index 989e0d5..f7fefed 100644 --- a/src/server/test/test_integration.py +++ b/src/server/test/test_integration.py @@ -171,12 +171,12 @@ def test_train_eval_run(data_train, data_eval, model): # retrieve the attribute names of the class of the current model attrs = model.__dict__.keys() - if "classifier" in attrs: - attrs_classifier = model.classifier.__dict__.keys() - if "loss" in attrs_classifier: - assert(model.classifier.loss<0.4) - if "metric" in attrs_classifier: - assert(model.classifier.metric>0.4) + # if "classifier" in attrs: + # # attrs_classifier = model.classifier.__dict__.keys() + # # if "loss" in attrs_classifier: + # assert(model.classifier.loss<0.4) + # # if "metric" in attrs_classifier: + # assert(model.classifier.metric>0.4) if "metric" in attrs: assert(model.metric>0.1) if "loss" in attrs: From 10cd825112c512c9bd51dceaa4b025b06392475f Mon Sep 17 00:00:00 2001 From: Mariia Date: Sun, 14 Jan 2024 23:43:33 +0100 Subject: [PATCH 04/40] Solve NotFitted Error --- src/server/dcp_server/models.py | 17 +++++++++++++---- src/server/dcp_server/serviceclasses.py | 18 +++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index e636f66..47d81bc 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -9,6 +9,7 @@ from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score, log_loss +from sklearn.exceptions import NotFittedError from cellpose.metrics import aggregated_jaccard_index @@ -272,9 +273,8 @@ def train(self, imgs, masks): else: self.classifier.train(patches, labels) # train classifier - self.metric = (self.segmentor.metric + self.classifier.metric) / 2 - self.loss = (self.segmentor.loss + self.classifier.loss) / 2 + self.loss = self.classifier.loss def eval(self, img): # TBD we assume image is either 2D [H, W] (see fsimage storage) @@ -301,6 +301,7 @@ def eval(self, img): for idx, patch in enumerate(patches): patch_class = self.classifier.eval(patch) # patch size should be HxWxC, e.g. 64,64,3 # Assign predicted class to corresponding location in final_mask + patch_class = patch_class.item() if isinstance(patch_class, torch.Tensor) else patch_class class_mask[instance_mask==instance_labels[idx]] = patch_class.item() + 1 # Apply mask to final_mask, retaining only regions where cellpose_mask is greater than 0 #class_mask = class_mask * (instance_mask > 0)#.long()) @@ -322,6 +323,7 @@ def __init__(self, model_config, train_config, eval_config): def train(self, X_train, y_train): self.model.fit(X_train,y_train) + y_hat = self.model.predict(X_train) y_hat_proba = self.model.predict_proba(X_train) @@ -330,8 +332,15 @@ def train(self, X_train, y_train): def eval(self, X_test): - print(f"X_test: {X_test.shape}") - return self.model.predict(X_test.reshape(1,-1)) + + X_test = X_test.reshape(1,-1) + + try: + y_hat = self.model.predict(X_test) + except NotFittedError as e: + y_hat = np.zeros(X_test.shape[0]) + + return y_hat # class CustomSAMModel(): # # https://github.com/facebookresearch/segment-anything/blob/main/notebooks/automatic_mask_generator_example.ipynb diff --git a/src/server/dcp_server/serviceclasses.py b/src/server/dcp_server/serviceclasses.py index 410ee42..246eabf 100644 --- a/src/server/dcp_server/serviceclasses.py +++ b/src/server/dcp_server/serviceclasses.py @@ -26,7 +26,7 @@ def __init__(self, model, save_model_path): self.model = model self.save_model_path = save_model_path # update with the latest model if it already exists to continue training from there? - self.check_and_load_model() + # self.check_and_load_model() @bentoml.Runnable.method(batchable=False) def evaluate(self, img: np.ndarray) -> np.ndarray: @@ -48,7 +48,7 @@ def evaluate(self, img: np.ndarray) -> np.ndarray: def check_and_load_model(self): bento_model_list = [model.tag.name for model in bentoml.models.list()] if self.save_model_path in bento_model_list: - loaded_model = bentoml.pytorch.load_model(self.save_model_path+":latest") + loaded_model = bentoml.picklable_model.load_model(self.save_model_path+":latest") assert loaded_model.__class__.__name__ == self.model.__class__.__name__, 'Check your config, loaded model and model to use not the same!' self.model = loaded_model @@ -65,11 +65,15 @@ def train(self, imgs: List[np.ndarray], masks: List[np.ndarray]) -> str: """ self.model.train(imgs, masks) # Save the bentoml model - #bentoml.picklable_model.save_model(self.save_model_path, self.model) - bentoml.pytorch.save_model(self.save_model_path, # Model name in the local Model Store - self.model, # Model instance being saved - external_modules=[DCPModels] - ) + bentoml.picklable_model.save_model( + self.save_model_path, + self.model, + external_modules=[DCPModels], + ) + # bentoml.pytorch.save_model(self.save_model_path, # Model name in the local Model Store + # self.model, # Model instance being saved + # external_modules=[DCPModels] + # ) return self.save_model_path From 1e9719d05461996c24457e53f2a6a03b79ebdf99 Mon Sep 17 00:00:00 2001 From: Mariia Date: Sun, 14 Jan 2024 23:50:38 +0100 Subject: [PATCH 05/40] requirements extend for random forest --- src/server/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index ea982b4..c77a485 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -6,3 +6,4 @@ torch>=2.1.0 pytest>=7.4.3 scikit-learn simpleITK +pyradiomics From 621ce50d7caa75debf44aa8eb86ced5b1f78b629 Mon Sep 17 00:00:00 2001 From: Mariia Date: Sun, 14 Jan 2024 23:59:25 +0100 Subject: [PATCH 06/40] versions updated --- src/server/requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index c77a485..6ae27ab 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -4,6 +4,7 @@ scikit-image>=0.19.3 torchmetrics>=0.11.4 torch>=2.1.0 pytest>=7.4.3 -scikit-learn -simpleITK -pyradiomics +numpy>=1.24.4 +scikit-learn>=1.2.2 +SimpleITK>=2.2.1 +pyradiomics>=3.1.0 From ebe263cc9a8c713c176887bd04fdc6c5985751dc Mon Sep 17 00:00:00 2001 From: Mariia Date: Mon, 15 Jan 2024 00:01:37 +0100 Subject: [PATCH 07/40] requirements update --- src/server/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 6ae27ab..18e9d7d 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -7,4 +7,4 @@ pytest>=7.4.3 numpy>=1.24.4 scikit-learn>=1.2.2 SimpleITK>=2.2.1 -pyradiomics>=3.1.0 +pyradiomics==3.1.0 From 62337e8a9fbe75e22758ea26808d3493605ce68c Mon Sep 17 00:00:00 2001 From: Mariia Date: Mon, 15 Jan 2024 00:03:27 +0100 Subject: [PATCH 08/40] version update for pyradiomics --- src/server/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 18e9d7d..67fb321 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -7,4 +7,4 @@ pytest>=7.4.3 numpy>=1.24.4 scikit-learn>=1.2.2 SimpleITK>=2.2.1 -pyradiomics==3.1.0 +pyradiomics==3.0.1a1 From 7d870c677ff883d58b1037b1a49f15c8aeec69f6 Mon Sep 17 00:00:00 2001 From: Mariia Date: Mon, 15 Jan 2024 00:05:08 +0100 Subject: [PATCH 09/40] update requirements --- src/server/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 67fb321..3fe6261 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -7,4 +7,4 @@ pytest>=7.4.3 numpy>=1.24.4 scikit-learn>=1.2.2 SimpleITK>=2.2.1 -pyradiomics==3.0.1a1 +pyradiomics From c569f119a408caf2b068db7a3485deae8ac2da7f Mon Sep 17 00:00:00 2001 From: Mariia Date: Mon, 15 Jan 2024 00:11:25 +0100 Subject: [PATCH 10/40] versions numpy --- src/server/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 3fe6261..64f17eb 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -4,7 +4,7 @@ scikit-image>=0.19.3 torchmetrics>=0.11.4 torch>=2.1.0 pytest>=7.4.3 -numpy>=1.24.4 +numpy scikit-learn>=1.2.2 SimpleITK>=2.2.1 pyradiomics From 14953c653b70433fa6e7be998c04178d6ef4c993 Mon Sep 17 00:00:00 2001 From: Mariia Date: Mon, 15 Jan 2024 00:16:47 +0100 Subject: [PATCH 11/40] fix dependencies --- src/server/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 64f17eb..2fa7010 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,3 +1,4 @@ +wheel cellpose>=2.2 bentoml==1.0.16 scikit-image>=0.19.3 From 4841f9809aeb4dc50f2d635f9fa0ed10ecc76093 Mon Sep 17 00:00:00 2001 From: Koren_Mariia <71977543+KorenMary@users.noreply.github.com> Date: Mon, 15 Jan 2024 00:19:52 +0100 Subject: [PATCH 12/40] Update test.yml --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 423d425..b42f958 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -88,6 +88,7 @@ jobs: python -m pip install --upgrade pip python -m pip install setuptools pip install pytest + pip install wheel pip install coverage pip install -e ".[testing]" working-directory: src/server From 1f989c44be8c15a0669d11a0faad39516a04934d Mon Sep 17 00:00:00 2001 From: Mariia Date: Mon, 15 Jan 2024 00:27:45 +0100 Subject: [PATCH 13/40] fix versions --- src/server/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 2fa7010..e3a9efb 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,4 +1,4 @@ -wheel +wheel==0.42.0 cellpose>=2.2 bentoml==1.0.16 scikit-image>=0.19.3 @@ -8,4 +8,4 @@ pytest>=7.4.3 numpy scikit-learn>=1.2.2 SimpleITK>=2.2.1 -pyradiomics +pyradiomics==3.0.1 \ No newline at end of file From 442d71b2f3cc10bd9f71aa682cf3736116bc6c22 Mon Sep 17 00:00:00 2001 From: Koren_Mariia <71977543+KorenMary@users.noreply.github.com> Date: Mon, 15 Jan 2024 00:30:05 +0100 Subject: [PATCH 14/40] Update test.yml --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b42f958..1dbb4dc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -86,7 +86,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install setuptools + python -m pip install --upgrade setuptools pip install pytest pip install wheel pip install coverage From 53796b38c69e94eefd60c1599e25b82921e080ef Mon Sep 17 00:00:00 2001 From: Koren_Mariia <71977543+KorenMary@users.noreply.github.com> Date: Mon, 15 Jan 2024 00:34:43 +0100 Subject: [PATCH 15/40] Update test.yml --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1dbb4dc..3c68eed 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -72,7 +72,7 @@ jobs: strategy: matrix: platform: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.8, 3.9, "3.10"] + python-version: [3.7, 3.8, 3.9, "3.10"] steps: - name: Checkout Repository From 97be59382d88609b183794c6d3eb4bda51850a98 Mon Sep 17 00:00:00 2001 From: Koren_Mariia <71977543+KorenMary@users.noreply.github.com> Date: Mon, 15 Jan 2024 00:40:12 +0100 Subject: [PATCH 16/40] Update test.yml --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3c68eed..0c42bca 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -87,6 +87,7 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install --upgrade setuptools + pip install numpy pip install pytest pip install wheel pip install coverage From eb0ac7c03053ff70c05e4c5678ad806c61a14fe3 Mon Sep 17 00:00:00 2001 From: Koren_Mariia <71977543+KorenMary@users.noreply.github.com> Date: Mon, 15 Jan 2024 00:41:53 +0100 Subject: [PATCH 17/40] Update test.yml --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0c42bca..52a3f08 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -72,7 +72,7 @@ jobs: strategy: matrix: platform: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.7, 3.8, 3.9, "3.10"] + python-version: [3.8, 3.9, "3.10"] steps: - name: Checkout Repository From 98a3d42c4e6f07833c384cc848a960caef5ad7a2 Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 12:58:01 +0100 Subject: [PATCH 18/40] Add loss and metric calculation for all classes and use it during the integration test. --- src/server/dcp_server/models.py | 32 ++++++++++++++++++++++++++--- src/server/dcp_server/utils.py | 12 +++++------ src/server/test/test_integration.py | 6 ------ 3 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index 47d81bc..a3cb8ca 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -12,6 +12,7 @@ from sklearn.exceptions import NotFittedError from cellpose.metrics import aggregated_jaccard_index +from cellpose.transforms import reshape_and_normalize_data #from segment_anything import SamPredictor, sam_model_registry #from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator @@ -42,6 +43,21 @@ def __init__(self, model_config, train_config, eval_config): self.mkldnn = False # otherwise we get error with saving model self.train_config = train_config self.eval_config = eval_config + self.loss_fun = nn.BCEWithLogitsLoss() + + def eval_logits(self, imgs): + """ + Evaluate the logits for a given set of images. + """ + + x_test = reshape_and_normalize_data(imgs, channels=[0, 0]) + + img_tensor = torch.tensor(x_test[0]) + model = self.net.eval() + img_tensor = img_tensor.unsqueeze(0) if img_tensor.ndim == 3 else img_tensor + img_logits = model(img_tensor)[0].squeeze()[:,2,...].detach().numpy() + + return img_logits def update_configs(self, train_config, eval_config): self.train_config = train_config @@ -76,9 +92,17 @@ def train(self, imgs, masks): super().train(train_data=deepcopy(imgs), train_labels=masks, **self.train_config["segmentor"]) - pred_masks = [self.eval(img) for img in masks] + logits = self.eval_logits(imgs) + + pred_masks = [self.eval(img) for img in imgs] self.metric = np.mean(aggregated_jaccard_index(masks, pred_masks)) - # self.loss = self.loss_fn(masks, pred_masks) + + masks = torch.tensor(np.array(masks).astype(np.float32)>0).long().float() + pred_masks = torch.tensor(np.array(pred_masks).astype(np.float32)>0).float() + + print(logits.shape, masks.shape) + + self.loss = self.loss_fun(torch.tensor(logits).float(), masks) def masks_to_outlines(self, mask): """ get outlines of masks as a 0-1 array @@ -258,9 +282,11 @@ def train(self, imgs, masks): # train cellpose masks = np.array(masks) masks_instances = list(masks[:,0,...]) #[mask.sum(-1) for mask in masks] if masks[0].ndim == 3 else masks - self.segmentor.train(imgs, masks_instances) + + self.segmentor.train(deepcopy(imgs), masks_instances) # create patch dataset to train classifier masks_classes = list(masks[:,1,...]) #[((mask > 0) * np.arange(1, 4)).sum(-1) for mask in masks] + patches, patch_masks, labels = create_patch_dataset(imgs, masks_classes, masks_instances, diff --git a/src/server/dcp_server/utils.py b/src/server/dcp_server/utils.py index f1d8a8b..fbf01ad 100644 --- a/src/server/dcp_server/utils.py +++ b/src/server/dcp_server/utils.py @@ -73,15 +73,16 @@ def crop_centered_padded_patch(x: np.ndarray, # Zero out values in the patch where the mask is not equal to the central label # m = (mask_ != central_label) & (mask_ > 0) m = (mask_ != l) & (mask_ > 0) + # print(m.shape, x.shape) x[m] = 0 + # m_broadcasted = np.broadcast_to(m[:, :, None], x.shape) + # x[:,m_broadcasted] = 0 if noise_intensity is not None: x[m] = np.random.normal(scale=noise_intensity, size=x[m].shape) - if mask is not None: - mask[m] = 0 - - patch = x[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] - if mask is not None: + mask[m] = 0 mask = mask[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] + + patch = x[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] # Calculate the required padding amounts size_x, size_y = x.shape[1], x.shape[0] @@ -239,7 +240,6 @@ def create_patch_dataset(imgs, masks_classes, masks_instances, noise_intensity, the max cell size to define the patch size. All patches and masks should then be returned in the same format as imgs and masks (same type, i.e. check if tensor or np.array and same convention of dims, e.g. CxHxW) - include_mask(bool) : Flag indicating whether to include the mask along with patches. ''' if max_patch_size is None: diff --git a/src/server/test/test_integration.py b/src/server/test/test_integration.py index f7fefed..3a31395 100644 --- a/src/server/test/test_integration.py +++ b/src/server/test/test_integration.py @@ -171,12 +171,6 @@ def test_train_eval_run(data_train, data_eval, model): # retrieve the attribute names of the class of the current model attrs = model.__dict__.keys() - # if "classifier" in attrs: - # # attrs_classifier = model.classifier.__dict__.keys() - # # if "loss" in attrs_classifier: - # assert(model.classifier.loss<0.4) - # # if "metric" in attrs_classifier: - # assert(model.classifier.metric>0.4) if "metric" in attrs: assert(model.metric>0.1) if "loss" in attrs: From 8bee547162e438fba65184beb0356592ab9e8c87 Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 13:42:08 +0100 Subject: [PATCH 19/40] Rename patches to features for RandomForest model --- src/server/dcp_server/models.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index a3cb8ca..ff77b3c 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -100,8 +100,6 @@ def train(self, imgs, masks): masks = torch.tensor(np.array(masks).astype(np.float32)>0).long().float() pred_masks = torch.tensor(np.array(pred_masks).astype(np.float32)>0).float() - print(logits.shape, masks.shape) - self.loss = self.loss_fun(torch.tensor(logits).float(), masks) def masks_to_outlines(self, mask): @@ -322,10 +320,15 @@ def eval(self, img): max_patch_size, noise_intensity=noise_intensity) if self.classifier_class == "RandomForest": - patches = create_dataset_for_rf(patches, patch_masks) + features = create_dataset_for_rf(patches, patch_masks) # loop over patches and create classification mask - for idx, patch in enumerate(patches): - patch_class = self.classifier.eval(patch) # patch size should be HxWxC, e.g. 64,64,3 + for idx in range(len(patches)): + if self.classifier_class == "RandomForest": + patch_class = self.classifier.eval(features[idx]) + else: + # patch size should be HxWxC, e.g. 64,64,3 + patch_class = self.classifier.eval(patches[idx]) + # Assign predicted class to corresponding location in final_mask patch_class = patch_class.item() if isinstance(patch_class, torch.Tensor) else patch_class class_mask[instance_mask==instance_labels[idx]] = patch_class.item() + 1 From 20e93997c9ff0581a753767b260a3df580c0d60e Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 14:08:42 +0100 Subject: [PATCH 20/40] Update the docstring for crop_centered_padded_patch function. --- src/server/dcp_server/models.py | 1 + src/server/dcp_server/utils.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index ff77b3c..4e1843f 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -357,6 +357,7 @@ def train(self, X_train, y_train): y_hat_proba = self.model.predict_proba(X_train) self.metric = f1_score(y_train, y_hat, average='micro') + # Binary Cross Entropy Loss self.loss = log_loss(y_train, y_hat_proba) diff --git a/src/server/dcp_server/utils.py b/src/server/dcp_server/utils.py index fbf01ad..43e994a 100644 --- a/src/server/dcp_server/utils.py +++ b/src/server/dcp_server/utils.py @@ -53,6 +53,10 @@ def crop_centered_padded_patch(x: np.ndarray, c (tuple): The coordinates (row, column) at the center of the patch. p (tuple): The size of the patch to be cropped (height, width). l (int): The instance label of the mask at the patch + mask (np.ndarray, optional): The mask array that asociated with the array x; + mask is used during training to mask out non-central elements; + for RandomForest, it is used to calculate pyradiomics features. + noise_intensity (float, optional): Intensity of noise to be added to the background. Returns: np.ndarray: The cropped patch with applied padding. @@ -71,12 +75,8 @@ def crop_centered_padded_patch(x: np.ndarray, if mask is not None: mask_ = mask.max(-1) if len(mask.shape) >= 3 else mask # Zero out values in the patch where the mask is not equal to the central label - # m = (mask_ != central_label) & (mask_ > 0) m = (mask_ != l) & (mask_ > 0) - # print(m.shape, x.shape) x[m] = 0 - # m_broadcasted = np.broadcast_to(m[:, :, None], x.shape) - # x[:,m_broadcasted] = 0 if noise_intensity is not None: x[m] = np.random.normal(scale=noise_intensity, size=x[m].shape) mask[m] = 0 From 29ea575e9082f0aee272a2d62692dc900781d98b Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 14:25:10 +0100 Subject: [PATCH 21/40] Add descriptions for the functions that extract features. --- src/server/dcp_server/utils.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/server/dcp_server/utils.py b/src/server/dcp_server/utils.py index 43e994a..3e28295 100644 --- a/src/server/dcp_server/utils.py +++ b/src/server/dcp_server/utils.py @@ -263,6 +263,17 @@ def create_patch_dataset(imgs, masks_classes, masks_instances, noise_intensity, def get_shape_features(img, msk): + """ + Calculate shape-based radiomic features from an image within the region defined by the mask. + + Args: + - img (np.ndarray): The input image. + - msk (np.ndarray): The mask corresponding to the image. + + Returns: + - np.ndarray: An array containing the calculated shape-based radiomic features, such as: + Elongation, Sphericity, Perimeter surface. + """ msk = 255 * ((msk) > 0).astype(np.uint8) @@ -276,6 +287,18 @@ def get_shape_features(img, msk): return np.array(list(shape_features.values())) def extract_intensity_features(image, mask): + """ + Extract intensity-based features from an image within the region defined by the mask. + + Args: + - image (np.ndarray): The input image. + - mask (np.ndarray): The mask defining the region of interest. + + Returns: + - np.ndarray: An array containing the extracted intensity-based features: + median intensity, mean intensity, 25th/75th percentile intensity within the masked region. + + """ features = {} From b394ae54145d5226cb389ddb83c2a68f32dca98f Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 16:54:45 +0100 Subject: [PATCH 22/40] add test for RF model --- src/server/test/test_models.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 src/server/test/test_models.py diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py new file mode 100644 index 0000000..b664b48 --- /dev/null +++ b/src/server/test/test_models.py @@ -0,0 +1,28 @@ +import numpy as np +import pytest +# from models import CellClassifierShallowModel +from sklearn.exceptions import NotFittedError + +import dcp_server.models as models +from dcp_server.utils import read_config +from synthetic_dataset import get_synthetic_dataset + + +""" +self.classifier = CellClassifierShallowModel(self.model_config, + self.train_config, + self.eval_config) +""" + +def test_eval_rf_not_fitted(): + + model_config = read_config('model', config_path='test/test_config.cfg') + train_config = read_config('train', config_path='test/test_config.cfg') + eval_config = read_config('eval', config_path='test/test_config.cfg') + + model_rf = models.CellClassifierShallowModel(model_config,train_config,eval_config) + + X_test = np.array([[1, 2, 3]]) + # if we don't fit the model than the model returns zeros + assert np.all(model_rf.eval(X_test)== np.zeros(X_test.shape)) + From ec105187a9aa6d406b47b5c3fdbda9e91a363a5e Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 17:25:04 +0100 Subject: [PATCH 23/40] Add test for update config function --- src/server/test/test_models.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index b664b48..874fcb1 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -26,3 +26,20 @@ def test_eval_rf_not_fitted(): # if we don't fit the model than the model returns zeros assert np.all(model_rf.eval(X_test)== np.zeros(X_test.shape)) +def test_update_configs(): + + model_config = read_config('model', config_path='test/test_config.cfg') + train_config = read_config('train', config_path='test/test_config.cfg') + eval_config = read_config('eval', config_path='test/test_config.cfg') + + model = models.CustomCellposeModel(model_config,train_config,eval_config) + + new_train_config = {"param1": "value1"} + new_eval_config = {"param2": "value2"} + + model.update_configs(new_train_config, new_eval_config) + + assert model.train_config == new_train_config + assert model.eval_config == new_eval_config + + \ No newline at end of file From 844bbaa0a494223ce79452514001b38ccd64bdcc Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 22:22:29 +0100 Subject: [PATCH 24/40] Include FCNN check in integration tests. --- src/server/dcp_server/models.py | 12 +++- .../test_config_RF.cfg} | 0 src/server/test/configs/test_config_fcnn.cfg | 68 +++++++++++++++++++ src/server/test/test_integration.py | 18 +++-- src/server/test/test_models.py | 14 +++- 5 files changed, 101 insertions(+), 11 deletions(-) rename src/server/test/{test_config.cfg => configs/test_config_RF.cfg} (100%) create mode 100644 src/server/test/configs/test_config_fcnn.cfg diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index 4e1843f..e064427 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -3,6 +3,7 @@ from torch import nn from torch.optim import Adam from torch.utils.data import TensorDataset, DataLoader +from torchmetrics import F1Score from copy import deepcopy from tqdm import tqdm import numpy as np @@ -158,6 +159,8 @@ def __init__(self, model_config, train_config, eval_config): self.final_conv = nn.Conv2d(128, self.num_classes, 1) self.pooling = nn.AdaptiveMaxPool2d(1) + self.metric_fn = F1Score(num_classes=self.num_classes, task="multiclass") + def update_configs(self, train_config, eval_config): self.train_config = train_config self.eval_config = eval_config @@ -204,7 +207,7 @@ def train (self, imgs, labels): # TODO check if we should replace self.parameters with super.parameters() for _ in tqdm(range(epochs), desc="Running CellClassifierFCNN training"): - self.loss = 0 + self.loss, self.metric = 0, 0 for data in train_dataloader: imgs, labels = data @@ -216,7 +219,10 @@ def train (self, imgs, labels): optimizer.step() self.loss += l.item() + self.metric += self.metric_fn(preds, labels) + self.loss /= len(train_dataloader) + self.metric /= len(train_dataloader) def eval(self, img): """ @@ -331,7 +337,7 @@ def eval(self, img): # Assign predicted class to corresponding location in final_mask patch_class = patch_class.item() if isinstance(patch_class, torch.Tensor) else patch_class - class_mask[instance_mask==instance_labels[idx]] = patch_class.item() + 1 + class_mask[instance_mask==instance_labels[idx]] = patch_class + 1 # Apply mask to final_mask, retaining only regions where cellpose_mask is greater than 0 #class_mask = class_mask * (instance_mask > 0)#.long()) final_mask = np.stack((instance_mask, class_mask), axis=self.eval_config['mask_channel_axis']).astype(np.uint16) # size 2xHxW @@ -357,7 +363,7 @@ def train(self, X_train, y_train): y_hat_proba = self.model.predict_proba(X_train) self.metric = f1_score(y_train, y_hat, average='micro') - # Binary Cross Entropy Loss + # Binary Cross Entrop Loss self.loss = log_loss(y_train, y_hat_proba) diff --git a/src/server/test/test_config.cfg b/src/server/test/configs/test_config_RF.cfg similarity index 100% rename from src/server/test/test_config.cfg rename to src/server/test/configs/test_config_RF.cfg diff --git a/src/server/test/configs/test_config_fcnn.cfg b/src/server/test/configs/test_config_fcnn.cfg new file mode 100644 index 0000000..34465f6 --- /dev/null +++ b/src/server/test/configs/test_config_fcnn.cfg @@ -0,0 +1,68 @@ +{ + "setup": { + "segmentation": "GeneralSegmentation", + "accepted_types": [".jpg", ".jpeg", ".png", ".tiff", ".tif"], + "seg_name_string": "_seg" + }, + + "service": { + "model_to_use": "CustomCellposeModel", + "save_model_path": "mito", + "runner_name": "cellpose_runner", + "service_name": "data-centric-platform", + "port": 7010 + }, + + "model": { + "segmentor": { + "model_type": "cyto" + }, + "classifier":{ + "model_class": "FCNN", + "in_channels": 1, + "num_classes": 3, + "black_bg": "False", + "include_mask": "False" + } + }, + + "data": { + "data_root": "data" + }, + + "train":{ + "segmentor":{ + "n_epochs": 20, + "channels": [0,0], + "min_train_masks": 1, + "learning_rate":0.01 + }, + "classifier":{ + "train_data":{ + "patch_size": 64, + "noise_intensity": 5, + "num_classes": 3 + }, + "n_epochs": 20, + "lr": 0.005, + "batch_size": 5, + "optimizer": "Adam" + } + }, + + "eval":{ + "segmentor": { + "z_axis": null, + "channel_axis": null, + "rescale": 1, + "batch_size": 1 + }, + "classifier": { + "data":{ + "patch_size": 64, + "noise_intensity": 5 + } + }, + "mask_channel_axis": 0 + } +} \ No newline at end of file diff --git a/src/server/test/test_integration.py b/src/server/test/test_integration.py index 3a31395..7809eeb 100644 --- a/src/server/test/test_integration.py +++ b/src/server/test/test_integration.py @@ -1,10 +1,11 @@ -import os import sys import torch from torchmetrics import JaccardIndex import numpy as np import random +from glob import glob + import inspect # from importlib.machinery import SourceFileLoader @@ -29,17 +30,22 @@ and not cls_name.startswith("CellClassifier") ] +config_paths = glob("test/configs/*.cfg") @pytest.fixture(params=model_classes) def model_class(request): return request.param +@pytest.fixture(params=config_paths) +def config_path(request): + return request.param + @pytest.fixture() -def model(model_class): +def model(model_class, config_path): - model_config = read_config('model', config_path='test/test_config.cfg') - train_config = read_config('train', config_path='test/test_config.cfg') - eval_config = read_config('eval', config_path='test/test_config.cfg') + model_config = read_config('model', config_path=config_path) + train_config = read_config('train', config_path=config_path) + eval_config = read_config('eval', config_path=config_path) model = model_class(model_config, train_config, eval_config) @@ -174,7 +180,7 @@ def test_train_eval_run(data_train, data_eval, model): if "metric" in attrs: assert(model.metric>0.1) if "loss" in attrs: - assert(model.loss<0.3) + assert(model.loss<0.75) # for PatchCNN model if pred_mask.ndim > 2: diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index 874fcb1..f878c30 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -23,7 +23,7 @@ def test_eval_rf_not_fitted(): model_rf = models.CellClassifierShallowModel(model_config,train_config,eval_config) X_test = np.array([[1, 2, 3]]) - # if we don't fit the model than the model returns zeros + # if we don't fit the model then the model returns zeros assert np.all(model_rf.eval(X_test)== np.zeros(X_test.shape)) def test_update_configs(): @@ -42,4 +42,14 @@ def test_update_configs(): assert model.train_config == new_train_config assert model.eval_config == new_eval_config - \ No newline at end of file +def test_fcnn(): + + model_config = read_config('model', config_path='test/test_config.cfg') + train_config = read_config('train', config_path='test/test_config.cfg') + eval_config = read_config('eval', config_path='test/test_config.cfg') + + model_config["classifier"]["model_class"] = "CellClassifierFCNN" + model = models.CellClassifierFCNN(model_config, train_config, eval_config) + + + From e21aa8c68062ab5379393e5786eec0c88cc1d1a1 Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 23:04:19 +0100 Subject: [PATCH 25/40] Delete redundant test --- src/server/test/test_models.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index f878c30..2045075 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -42,14 +42,5 @@ def test_update_configs(): assert model.train_config == new_train_config assert model.eval_config == new_eval_config -def test_fcnn(): - model_config = read_config('model', config_path='test/test_config.cfg') - train_config = read_config('train', config_path='test/test_config.cfg') - eval_config = read_config('eval', config_path='test/test_config.cfg') - - model_config["classifier"]["model_class"] = "CellClassifierFCNN" - model = models.CellClassifierFCNN(model_config, train_config, eval_config) - - From 432d8746d6e4f5791f44ad3d9547b29844593bc7 Mon Sep 17 00:00:00 2001 From: Mariia Date: Tue, 16 Jan 2024 23:29:45 +0100 Subject: [PATCH 26/40] Change config name --- src/server/test/test_models.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index 2045075..1d9ec8a 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -16,9 +16,9 @@ def test_eval_rf_not_fitted(): - model_config = read_config('model', config_path='test/test_config.cfg') - train_config = read_config('train', config_path='test/test_config.cfg') - eval_config = read_config('eval', config_path='test/test_config.cfg') + model_config = read_config('model', config_path='test/test_config_RF.cfg') + train_config = read_config('train', config_path='test/test_config_RF.cfg') + eval_config = read_config('eval', config_path='test/test_config_RF.cfg') model_rf = models.CellClassifierShallowModel(model_config,train_config,eval_config) @@ -28,9 +28,9 @@ def test_eval_rf_not_fitted(): def test_update_configs(): - model_config = read_config('model', config_path='test/test_config.cfg') - train_config = read_config('train', config_path='test/test_config.cfg') - eval_config = read_config('eval', config_path='test/test_config.cfg') + model_config = read_config('model', config_path='test/test_config_RF.cfg') + train_config = read_config('train', config_path='test/test_config_RF.cfg') + eval_config = read_config('eval', config_path='test/test_config_RF.cfg') model = models.CustomCellposeModel(model_config,train_config,eval_config) From d6d6e05464ebc203357ad1469438f7fd1d115b6f Mon Sep 17 00:00:00 2001 From: Mariia Date: Wed, 17 Jan 2024 00:12:15 +0100 Subject: [PATCH 27/40] Chnage the paths to configs --- src/server/test/test_models.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index 1d9ec8a..9f8ca51 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -16,9 +16,9 @@ def test_eval_rf_not_fitted(): - model_config = read_config('model', config_path='test/test_config_RF.cfg') - train_config = read_config('train', config_path='test/test_config_RF.cfg') - eval_config = read_config('eval', config_path='test/test_config_RF.cfg') + model_config = read_config('model', config_path='test/configs/test_config_RF.cfg') + train_config = read_config('train', config_path='test/configs/test_config_RF.cfg') + eval_config = read_config('eval', config_path='test/configs/test_config_RF.cfg') model_rf = models.CellClassifierShallowModel(model_config,train_config,eval_config) @@ -28,9 +28,9 @@ def test_eval_rf_not_fitted(): def test_update_configs(): - model_config = read_config('model', config_path='test/test_config_RF.cfg') - train_config = read_config('train', config_path='test/test_config_RF.cfg') - eval_config = read_config('eval', config_path='test/test_config_RF.cfg') + model_config = read_config('model', config_path='test/configs/test_config_RF.cfg') + train_config = read_config('train', config_path='test/configs/test_config_RF.cfg') + eval_config = read_config('eval', config_path='test/configs/test_config_RF.cfg') model = models.CustomCellposeModel(model_config,train_config,eval_config) From d059c7f8ba4fa678cc2b20888c998199a7986d66 Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Wed, 24 Jan 2024 18:25:12 +0100 Subject: [PATCH 28/40] changed arguments naming --- src/server/dcp_server/utils.py | 112 +++++++++++++++++---------------- 1 file changed, 57 insertions(+), 55 deletions(-) diff --git a/src/server/dcp_server/utils.py b/src/server/dcp_server/utils.py index 3e28295..3e22670 100644 --- a/src/server/dcp_server/utils.py +++ b/src/server/dcp_server/utils.py @@ -1,10 +1,9 @@ from pathlib import Path import json +from copy import deepcopy import numpy as np -from scipy.ndimage import find_objects, center_of_mass +from scipy.ndimage import find_objects from skimage import measure -from copy import deepcopy - import SimpleITK as sitk from radiomics import shape2D @@ -39,54 +38,52 @@ def join_path(root_dir, filepath): return str(Path(root_dir, filepath)) def get_file_extension(file): return str(Path(file).suffix) -def crop_centered_padded_patch(x: np.ndarray, - c, - p, - l, +def crop_centered_padded_patch(img: np.ndarray, + patch_center_xy, + patch_size, + obj_label, mask: np.ndarray=None, noise_intensity=None) -> np.ndarray: """ Crop a patch from an array `x` centered at coordinates `c` with size `p`, and apply padding if necessary. Args: - x (np.ndarray): The input array from which the patch will be cropped. - c (tuple): The coordinates (row, column) at the center of the patch. - p (tuple): The size of the patch to be cropped (height, width). - l (int): The instance label of the mask at the patch + img (np.ndarray): The input array from which the patch will be cropped. + patch_center_xy (tuple): The coordinates (row, column) at the center of the patch. + patch_size (tuple): The size of the patch to be cropped (height, width). + obj_label (int): The instance label of the mask at the patch mask (np.ndarray, optional): The mask array that asociated with the array x; - mask is used during training to mask out non-central elements; - for RandomForest, it is used to calculate pyradiomics features. + mask is used during training to mask out non-central elements; + for RandomForest, it is used to calculate pyradiomics features. noise_intensity (float, optional): Intensity of noise to be added to the background. Returns: np.ndarray: The cropped patch with applied padding. """ - height, width = p # Size of the patch - + height, width = patch_size # Size of the patch + img_height, img_width = img.shape[0], img.shape[1] # Size of the input image + # Calculate the boundaries of the patch - top = c[0] - height // 2 + top = patch_center_xy[0] - height // 2 bottom = top + height - - left = c[1] - width // 2 + left = patch_center_xy[1] - width // 2 right = left + width # Crop the patch from the input array if mask is not None: mask_ = mask.max(-1) if len(mask.shape) >= 3 else mask # Zero out values in the patch where the mask is not equal to the central label - m = (mask_ != l) & (mask_ > 0) - x[m] = 0 - if noise_intensity is not None: - x[m] = np.random.normal(scale=noise_intensity, size=x[m].shape) - mask[m] = 0 - mask = mask[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] - - patch = x[max(top, 0):min(bottom, x.shape[0]), max(left, 0):min(right, x.shape[1]), :] - # Calculate the required padding amounts - size_x, size_y = x.shape[1], x.shape[0] - - # Apply padding if necessary + mask_other_objs = (mask_ != obj_label) & (mask_ > 0) + img[mask_other_objs] = 0 + # Add random noise at locations where other objects are present if noise_intensity is given + if noise_intensity is not None: img[mask_other_objs] = np.random.normal(scale=noise_intensity, size=img[mask_other_objs].shape) + mask[mask_other_objs] = 0 + # crop the mask + mask = mask[max(top, 0):min(bottom, img_height), max(left, 0):min(right, img_width), :] + + patch = img[max(top, 0):min(bottom, img_height), max(left, 0):min(right, img_width), :] + # Calculate the required padding amounts and apply padding if necessary if left < 0: patch = np.hstack(( np.random.normal(scale=noise_intensity, size=(patch.shape[0], abs(left), patch.shape[2])).astype(np.uint8), @@ -96,14 +93,14 @@ def crop_centered_padded_patch(x: np.ndarray, np.zeros((mask.shape[0], abs(left), mask.shape[2])).astype(np.uint8), mask)) # Apply padding on the right side if necessary - if right > size_x: + if right > img_width: patch = np.hstack(( patch, - np.random.normal(scale=noise_intensity, size=(patch.shape[0], (right - size_x), patch.shape[2])).astype(np.uint8))) + np.random.normal(scale=noise_intensity, size=(patch.shape[0], (right - img_width), patch.shape[2])).astype(np.uint8))) if mask is not None: mask = np.hstack(( mask, - np.zeros((mask.shape[0], (right - size_x), mask.shape[2])).astype(np.uint8))) + np.zeros((mask.shape[0], (right - img_width), mask.shape[2])).astype(np.uint8))) # Apply padding on the top side if necessary if top < 0: patch = np.vstack(( @@ -114,14 +111,14 @@ def crop_centered_padded_patch(x: np.ndarray, np.zeros((abs(top), mask.shape[1], mask.shape[2])).astype(np.uint8), mask)) # Apply padding on the bottom side if necessary - if bottom > size_y: + if bottom > img_height: patch = np.vstack(( patch, - np.random.normal(scale=noise_intensity, size=(bottom - size_y, patch.shape[1], patch.shape[2])).astype(np.uint8))) + np.random.normal(scale=noise_intensity, size=(bottom - img_height, patch.shape[1], patch.shape[2])).astype(np.uint8))) if mask is not None: mask = np.vstack(( mask, - np.zeros((bottom - size_y, mask.shape[1], mask.shape[2])).astype(np.uint8))) + np.zeros((bottom - img_height, mask.shape[1], mask.shape[2])).astype(np.uint8))) return patch, mask @@ -177,23 +174,20 @@ def get_centered_patches(img, # compute center of mass of objects centers_of_mass, instance_labels = get_center_of_mass_and_label(mask) # Crop patches around each center of mass - for c, l in zip(centers_of_mass, instance_labels): + for c, obj_label in zip(centers_of_mass, instance_labels): c_x, c_y = c - patch, patch_mask = crop_centered_padded_patch(img.copy(), (c_x, c_y), (p_size, p_size), - l, + obj_label, mask=deepcopy(mask), noise_intensity=noise_intensity) - - patches.append(patch) patch_masks.append(patch_mask) if mask_class is not None: # get the class instance for the specific object - instance_labels.append(l) - class_l = int(np.unique(mask_class[mask[:,:,0]==l])) + instance_labels.append(obj_label) + class_l = int(np.unique(mask_class[mask[:,:,0]==obj_label])) #-1 because labels from mask start from 1, we want classes to start from 0 class_labels.append(class_l-1) @@ -262,26 +256,26 @@ def create_patch_dataset(imgs, masks_classes, masks_instances, noise_intensity, return patches, patch_masks, labels -def get_shape_features(img, msk): +def get_shape_features(img, mask): """ Calculate shape-based radiomic features from an image within the region defined by the mask. Args: - img (np.ndarray): The input image. - - msk (np.ndarray): The mask corresponding to the image. + - mask (np.ndarray): The mask corresponding to the image. Returns: - np.ndarray: An array containing the calculated shape-based radiomic features, such as: Elongation, Sphericity, Perimeter surface. """ - msk = 255 * ((msk) > 0).astype(np.uint8) + mask = 255 * ((mask) > 0).astype(np.uint8) image = sitk.GetImageFromArray(img.squeeze()) - roi_mask = sitk.GetImageFromArray(msk.squeeze()) + roi_mask = sitk.GetImageFromArray(mask.squeeze()) shape_calculator = shape2D.RadiomicsShape2D(inputImage=image, inputMask=roi_mask, label=255) - # # Calculate the shape-based radiomic features + # Calculate the shape-based radiomic features shape_features = shape_calculator.execute() return np.array(list(shape_features.values())) @@ -318,15 +312,23 @@ def extract_intensity_features(image, mask): return np.array(list(features.values())) def create_dataset_for_rf(imgs, masks): - - # X-features, y-labels - X = [] + """ + Extract intensity-based features from an image within the region defined by the mask. + + Args: + - imgs (List): A list of all input images. + - mask (List): A list of all corresponding masks defining the region of interest. - for img, msk in zip(imgs, masks): + Returns: + - List: A list of arrays containing shape and intensity-based features + + """ + X = [] + for img, mask in zip(imgs, masks): - shape_features = get_shape_features(img, msk) - intensity_features = extract_intensity_features(img, msk) - features_list = np.concatenate((shape_features,intensity_features), axis=0) + shape_features = get_shape_features(img, mask) + intensity_features = extract_intensity_features(img, mask) + features_list = np.concatenate((shape_features, intensity_features), axis=0) X.append(features_list) return X \ No newline at end of file From e6a8f1352c2040cf02cbd2b7e3fadd8b9de5c9ef Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Wed, 24 Jan 2024 18:25:26 +0100 Subject: [PATCH 29/40] compute cellpose loss --- src/server/dcp_server/models.py | 86 ++++++++++++++------------------- 1 file changed, 35 insertions(+), 51 deletions(-) diff --git a/src/server/dcp_server/models.py b/src/server/dcp_server/models.py index e064427..3fcbe1a 100644 --- a/src/server/dcp_server/models.py +++ b/src/server/dcp_server/models.py @@ -13,8 +13,7 @@ from sklearn.exceptions import NotFittedError from cellpose.metrics import aggregated_jaccard_index -from cellpose.transforms import reshape_and_normalize_data - +from cellpose.dynamics import labels_to_flows #from segment_anything import SamPredictor, sam_model_registry #from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator @@ -44,21 +43,8 @@ def __init__(self, model_config, train_config, eval_config): self.mkldnn = False # otherwise we get error with saving model self.train_config = train_config self.eval_config = eval_config - self.loss_fun = nn.BCEWithLogitsLoss() - - def eval_logits(self, imgs): - """ - Evaluate the logits for a given set of images. - """ + self.loss = 1e6 - x_test = reshape_and_normalize_data(imgs, channels=[0, 0]) - - img_tensor = torch.tensor(x_test[0]) - model = self.net.eval() - img_tensor = img_tensor.unsqueeze(0) if img_tensor.ndim == 3 else img_tensor - img_logits = model(img_tensor)[0].squeeze()[:,2,...].detach().numpy() - - return img_logits def update_configs(self, train_config, eval_config): self.train_config = train_config @@ -84,24 +70,30 @@ def train(self, imgs, masks): :param masks: masks of the given images (training labels) :type masks: List[np.ndarray] """ - - if not isinstance(masks, np.ndarray): - masks = np.array(masks) if masks[0].shape[0] == 2: masks = list(masks[:,0,...]) - super().train(train_data=deepcopy(imgs), train_labels=masks, **self.train_config["segmentor"]) - - logits = self.eval_logits(imgs) - pred_masks = [self.eval(img) for img in imgs] + # compute loss and metric + true_bin_masks = [mask>0 for mask in masks] # get binary masks + true_flows = labels_to_flows(masks) # get cellpose flows + # get predicted flows and cell probability + pred_masks = [] + pred_flows = [] + true_lbl = [] + for idx, img in enumerate(imgs): + mask, flows, _ = super().eval(x=img, **self.eval_config["segmentor"]) + pred_masks.append(mask) + pred_flows.append(np.stack([flows[1][0], flows[1][1], flows[2]])) # stack cell probability map, horizontal and vertical flow + true_lbl.append(np.stack([true_bin_masks[idx], true_flows[idx][2], true_flows[idx][3]])) + + true_lbl = np.stack(true_lbl) + pred_flows=np.stack(pred_flows) + pred_flows = torch.from_numpy(pred_flows).float().to('cpu') + # compute loss, combination of mse for flows and bce for cell probability + self.loss = self.loss_fn(true_lbl, pred_flows) self.metric = np.mean(aggregated_jaccard_index(masks, pred_masks)) - - masks = torch.tensor(np.array(masks).astype(np.float32)>0).long().float() - pred_masks = torch.tensor(np.array(pred_masks).astype(np.float32)>0).float() - - self.loss = self.loss_fun(torch.tensor(logits).float(), masks) def masks_to_outlines(self, mask): """ get outlines of masks as a 0-1 array @@ -286,28 +278,25 @@ def train(self, imgs, masks): # train cellpose masks = np.array(masks) masks_instances = list(masks[:,0,...]) #[mask.sum(-1) for mask in masks] if masks[0].ndim == 3 else masks - self.segmentor.train(deepcopy(imgs), masks_instances) # create patch dataset to train classifier masks_classes = list(masks[:,1,...]) #[((mask > 0) * np.arange(1, 4)).sum(-1) for mask in masks] - patches, patch_masks, labels = create_patch_dataset(imgs, - masks_classes, - masks_instances, - noise_intensity = self.train_config["classifier"]["train_data"]["noise_intensity"], - max_patch_size = self.train_config["classifier"]["train_data"]["patch_size"]) - + masks_classes, + masks_instances, + noise_intensity = self.train_config["classifier"]["train_data"]["noise_intensity"], + max_patch_size = self.train_config["classifier"]["train_data"]["patch_size"]) + x = patches if self.classifier_class == "RandomForest": - features = create_dataset_for_rf(patches, patch_masks) - self.classifier.train(features, labels) - else: - self.classifier.train(patches, labels) + x = create_dataset_for_rf(patches, patch_masks) # train classifier + self.classifier.train(x, labels) + # and compute metric and loss self.metric = (self.segmentor.metric + self.classifier.metric) / 2 - self.loss = self.classifier.loss + self.loss = (self.segmentor.loss + self.classifier.loss)/2 def eval(self, img): - # TBD we assume image is either 2D [H, W] (see fsimage storage) + # TBD we assume image is 2D [H, W] (see fsimage storage) # The final mask which is returned should have # first channel the output of cellpose and the rest are the class channels with torch.no_grad(): @@ -325,21 +314,16 @@ def eval(self, img): instance_mask, max_patch_size, noise_intensity=noise_intensity) + x = patches if self.classifier_class == "RandomForest": - features = create_dataset_for_rf(patches, patch_masks) + x = create_dataset_for_rf(patches, patch_masks) # loop over patches and create classification mask - for idx in range(len(patches)): - if self.classifier_class == "RandomForest": - patch_class = self.classifier.eval(features[idx]) - else: - # patch size should be HxWxC, e.g. 64,64,3 - patch_class = self.classifier.eval(patches[idx]) - + for idx in range(len(x)): + patch_class = self.classifier.eval(x[idx]) # Assign predicted class to corresponding location in final_mask patch_class = patch_class.item() if isinstance(patch_class, torch.Tensor) else patch_class class_mask[instance_mask==instance_labels[idx]] = patch_class + 1 # Apply mask to final_mask, retaining only regions where cellpose_mask is greater than 0 - #class_mask = class_mask * (instance_mask > 0)#.long()) final_mask = np.stack((instance_mask, class_mask), axis=self.eval_config['mask_channel_axis']).astype(np.uint16) # size 2xHxW return final_mask @@ -352,7 +336,7 @@ def __init__(self, model_config, train_config, eval_config): self.train_config = train_config self.eval_config = eval_config - self.model = RandomForestClassifier() + self.model = RandomForestClassifier() # TODO chnage config so RandomForestClassifier accepts input params def train(self, X_train, y_train): From 737e183302a8f4d1979e826bdbb9b8ae90e09155 Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Wed, 24 Jan 2024 18:25:56 +0100 Subject: [PATCH 30/40] cleanup of test --- src/server/test/test_models.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index 9f8ca51..e3bbb23 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -1,18 +1,8 @@ -import numpy as np import pytest -# from models import CellClassifierShallowModel -from sklearn.exceptions import NotFittedError +import numpy as np import dcp_server.models as models from dcp_server.utils import read_config -from synthetic_dataset import get_synthetic_dataset - - -""" -self.classifier = CellClassifierShallowModel(self.model_config, - self.train_config, - self.eval_config) -""" def test_eval_rf_not_fitted(): From f5d30b4d58b6c7669b9666d381d22d517f78efee Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Wed, 24 Jan 2024 18:26:38 +0100 Subject: [PATCH 31/40] remove np and wheel, shouldn't be needed, made pyradiomics larger or equal --- src/server/requirements.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index e3a9efb..2d5c2db 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,11 +1,9 @@ -wheel==0.42.0 cellpose>=2.2 bentoml==1.0.16 scikit-image>=0.19.3 torchmetrics>=0.11.4 torch>=2.1.0 pytest>=7.4.3 -numpy scikit-learn>=1.2.2 SimpleITK>=2.2.1 -pyradiomics==3.0.1 \ No newline at end of file +pyradiomics>=3.0.1 \ No newline at end of file From cac499b9c6e23f81ceb8d9b4f23409e57a43c4ce Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Wed, 24 Jan 2024 18:26:53 +0100 Subject: [PATCH 32/40] remove numpy and wheel from install --- .github/workflows/test.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 52a3f08..98aeae8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -87,9 +87,7 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install --upgrade setuptools - pip install numpy pip install pytest - pip install wheel pip install coverage pip install -e ".[testing]" working-directory: src/server From c4d4e3378b68720614e4cae503798855b545d738 Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 14:12:14 +0100 Subject: [PATCH 33/40] include load model on init again --- src/server/dcp_server/serviceclasses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/dcp_server/serviceclasses.py b/src/server/dcp_server/serviceclasses.py index 246eabf..72a1e35 100644 --- a/src/server/dcp_server/serviceclasses.py +++ b/src/server/dcp_server/serviceclasses.py @@ -26,7 +26,7 @@ def __init__(self, model, save_model_path): self.model = model self.save_model_path = save_model_path # update with the latest model if it already exists to continue training from there? - # self.check_and_load_model() + self.check_and_load_model() @bentoml.Runnable.method(batchable=False) def evaluate(self, img: np.ndarray) -> np.ndarray: From 1200803b520a7bfc92c3b3346d8eae63cde535a3 Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 15:24:01 +0100 Subject: [PATCH 34/40] including numpy again to yml workflow --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f6ba999..6860ae0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -94,6 +94,7 @@ jobs: python -m pip install --upgrade setuptools pip install pytest pip install coverage + pip install numpy pip install -e ".[testing]" working-directory: src/server From a3bcf8cc5a4a9d5c0a94c7defd81be2d38249642 Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 15:58:44 +0100 Subject: [PATCH 35/40] added numpy to requirements instead --- .github/workflows/test.yml | 1 - src/server/requirements.txt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6860ae0..f6ba999 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -94,7 +94,6 @@ jobs: python -m pip install --upgrade setuptools pip install pytest pip install coverage - pip install numpy pip install -e ".[testing]" working-directory: src/server diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 2d5c2db..dc09b18 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,3 +1,4 @@ +numpy>=1.26.2 cellpose>=2.2 bentoml==1.0.16 scikit-image>=0.19.3 From a49a2d922cc6f3785d87ca6db283c55f49ad2ecc Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 16:00:45 +0100 Subject: [PATCH 36/40] remove python 3.8 from server runs --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f6ba999..4d5bc81 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -77,7 +77,7 @@ jobs: strategy: matrix: platform: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.8, 3.9, "3.10"] + python-version: [3.9, "3.10"] steps: - name: Checkout Repository From 6a1875704c20f48a9f9cc1d1293f7342b04ddb6c Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 16:12:25 +0100 Subject: [PATCH 37/40] removing version of numpy --- src/server/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index dc09b18..e9ba87b 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,4 +1,3 @@ -numpy>=1.26.2 cellpose>=2.2 bentoml==1.0.16 scikit-image>=0.19.3 @@ -7,4 +6,5 @@ torch>=2.1.0 pytest>=7.4.3 scikit-learn>=1.2.2 SimpleITK>=2.2.1 +numpy pyradiomics>=3.0.1 \ No newline at end of file From 5436c81cae9d80104ab4f7e734721876517db9a2 Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 16:16:27 +0100 Subject: [PATCH 38/40] reverting to previous requirements file --- src/server/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server/requirements.txt b/src/server/requirements.txt index e9ba87b..7fe8a2c 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -1,10 +1,11 @@ +wheel==0.42.0 cellpose>=2.2 bentoml==1.0.16 scikit-image>=0.19.3 torchmetrics>=0.11.4 torch>=2.1.0 pytest>=7.4.3 +numpy scikit-learn>=1.2.2 SimpleITK>=2.2.1 -numpy pyradiomics>=3.0.1 \ No newline at end of file From 06ffbd2899f259feb627ea617a54de1c15744efe Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 16:21:38 +0100 Subject: [PATCH 39/40] reverting to previous workflow file --- .github/workflows/test.yml | 2 ++ src/server/requirements.txt | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4d5bc81..255b0ac 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -92,7 +92,9 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install --upgrade setuptools + pip install numpy pip install pytest + pip install wheel pip install coverage pip install -e ".[testing]" working-directory: src/server diff --git a/src/server/requirements.txt b/src/server/requirements.txt index 7fe8a2c..e3a9efb 100644 --- a/src/server/requirements.txt +++ b/src/server/requirements.txt @@ -8,4 +8,4 @@ pytest>=7.4.3 numpy scikit-learn>=1.2.2 SimpleITK>=2.2.1 -pyradiomics>=3.0.1 \ No newline at end of file +pyradiomics==3.0.1 \ No newline at end of file From ee70715cad9e09386f86b73d18fa02cdf6ebd9ad Mon Sep 17 00:00:00 2001 From: Christina Bukas Date: Thu, 25 Jan 2024 17:26:36 +0100 Subject: [PATCH 40/40] adjusted tests after merge --- src/server/test/configs/test_config_fcnn.cfg | 1 + src/server/test/test_models.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server/test/configs/test_config_fcnn.cfg b/src/server/test/configs/test_config_fcnn.cfg index 34465f6..02039f6 100644 --- a/src/server/test/configs/test_config_fcnn.cfg +++ b/src/server/test/configs/test_config_fcnn.cfg @@ -21,6 +21,7 @@ "model_class": "FCNN", "in_channels": 1, "num_classes": 3, + "features":[64,128,256,512], "black_bg": "False", "include_mask": "False" } diff --git a/src/server/test/test_models.py b/src/server/test/test_models.py index e3bbb23..84b203c 100644 --- a/src/server/test/test_models.py +++ b/src/server/test/test_models.py @@ -22,7 +22,7 @@ def test_update_configs(): train_config = read_config('train', config_path='test/configs/test_config_RF.cfg') eval_config = read_config('eval', config_path='test/configs/test_config_RF.cfg') - model = models.CustomCellposeModel(model_config,train_config,eval_config) + model = models.CustomCellposeModel(model_config,train_config,eval_config, "Cellpose") new_train_config = {"param1": "value1"} new_eval_config = {"param2": "value2"}