diff --git a/src/client/dcp_client/utils/bentoml_model.py b/src/client/dcp_client/utils/bentoml_model.py index 189c129..5f57b42 100644 --- a/src/client/dcp_client/utils/bentoml_model.py +++ b/src/client/dcp_client/utils/bentoml_model.py @@ -2,6 +2,7 @@ from typing import Optional, List from bentoml.client import Client as BentoClient from bentoml.exceptions import BentoMLException +import numpy as np from dcp_client.app import Model diff --git a/src/server/dcp_server/config_instance.yaml b/src/server/dcp_server/config_instance.yaml index 1af6b1e..db266da 100644 --- a/src/server/dcp_server/config_instance.yaml +++ b/src/server/dcp_server/config_instance.yaml @@ -1,9 +1,7 @@ { "setup": { "segmentation": "GeneralSegmentation", - "model_to_use": "CustomCellpose", - "accepted_types": [".jpg", ".jpeg", ".png", ".tiff", ".tif"], - "seg_name_string": "_seg" + "model_to_use": "CustomCellpose" }, "service": { @@ -21,6 +19,8 @@ "data": { "data_root": "data", + "accepted_types": [".jpg", ".jpeg", ".png", ".tiff", ".tif"], + "seg_name_string": "_seg", "gray": True, "rescale": True }, diff --git a/src/server/dcp_server/config_semantic.yaml b/src/server/dcp_server/config_semantic.yaml index 928eb93..e72459a 100644 --- a/src/server/dcp_server/config_semantic.yaml +++ b/src/server/dcp_server/config_semantic.yaml @@ -1,9 +1,7 @@ { "setup": { "segmentation": "GeneralSegmentation", - "model_to_use": "UNet", - "accepted_types": [".jpg", ".jpeg", ".png", ".tiff", ".tif"], - "seg_name_string": "_seg" + "model_to_use": "UNet" }, "service": { @@ -23,6 +21,8 @@ "data": { "data_root": "data", + "accepted_types": [".jpg", ".jpeg", ".png", ".tiff", ".tif"], + "seg_name_string": "_seg", "gray": True, "rescale": True }, diff --git a/src/server/dcp_server/models/unet.py b/src/server/dcp_server/models/unet.py index c5c7a34..9d85a5f 100644 --- a/src/server/dcp_server/models/unet.py +++ b/src/server/dcp_server/models/unet.py @@ -152,7 +152,7 @@ def train(self, imgs: List[np.ndarray], masks: List[np.ndarray]) -> None: # compute metric on test set after train is complete for imgs, masks in train_dataloader: pred_masks = self.forward(imgs.float()) - self.metric += self.metric_f(masks, pred_masks) + self.metric += self.metric_f(pred_masks, masks) self.metric /= len(train_dataloader) def eval(self, img: np.ndarray) -> np.ndarray: