From 0b53a08ba5e9adf516ae0398a5de9922e8ea5df9 Mon Sep 17 00:00:00 2001 From: Rob1in Date: Mon, 21 Apr 2025 17:22:54 -0400 Subject: [PATCH 1/6] make get_classifications use cropped image inputs --- src/motion_detector.py | 94 ++++++++++++++++++++++++++++-------------- 1 file changed, 64 insertions(+), 30 deletions(-) diff --git a/src/motion_detector.py b/src/motion_detector.py index 370d593..ff0e7ac 100644 --- a/src/motion_detector.py +++ b/src/motion_detector.py @@ -1,25 +1,22 @@ import math -from typing import ClassVar, List, Mapping, Sequence, Any, Dict, Optional -from typing_extensions import Self +from typing import Any, ClassVar, Dict, List, Mapping, Optional, Sequence + import cv2 import numpy as np - - +import PIL +from typing_extensions import Self from viam.components.camera import Camera -from viam.media.video import ViamImage, CameraMimeType +from viam.logging import getLogger from viam.media.utils import pil -from viam.proto.service.vision import Classification, Detection -from viam.services.vision import Vision, CaptureAllResult +from viam.media.video import CameraMimeType, ViamImage from viam.module.types import Reconfigurable from viam.proto.app.robot import ServiceConfig from viam.proto.common import PointCloudObject, ResourceName +from viam.proto.service.vision import Classification, Detection from viam.resource.base import ResourceBase from viam.resource.types import Model, ModelFamily +from viam.services.vision import CaptureAllResult, Vision from viam.utils import ValueTypes -from viam.logging import getLogger - - - LOGGER = getLogger("MotionDetectorLogger") @@ -54,44 +51,57 @@ def new_service( # Validates JSON Configuration @classmethod - def validate_config( - cls, - config: ServiceConfig - ) -> Sequence[str]: + def validate_config(cls, config: ServiceConfig) -> Sequence[str]: validate_cam_name = config.attributes.fields["cam_name"].string_value validate_camera_name = config.attributes.fields["camera_name"].string_value if validate_cam_name == "" and validate_camera_name == "": raise ValueError( "Source camera must be provided as 'cam_name' or 'camera_name', " - "but neither was provided") + "but neither was provided" + ) if validate_cam_name != "" and validate_camera_name != "": raise ValueError( "Source camera must be provided as 'cam_name' or 'camera_name', " - "but both were provided") - source_cam = validate_cam_name if validate_cam_name != "" else validate_camera_name + "but both were provided" + ) + source_cam = ( + validate_cam_name if validate_cam_name != "" else validate_camera_name + ) - min_box_size = config.attributes.fields["min_box_size"].number_value + min_box_size = config.attributes.fields["min_box_size"].number_value min_box_percent = config.attributes.fields["min_box_percent"].number_value if min_box_size < 0: - raise ValueError("Minimum bounding box size should be a non-negative integer") + raise ValueError( + "Minimum bounding box size should be a non-negative integer" + ) if min_box_percent < 0.0 or min_box_percent > 1.0: - raise ValueError("Minimum bounding box percent should be between 0.0 and 1.0") + raise ValueError( + "Minimum bounding box percent should be between 0.0 and 1.0" + ) if min_box_size != 0 and min_box_percent != 0.0: - raise ValueError("Cannot specify the minimum box in both pixels and percentages") + raise ValueError( + "Cannot specify the minimum box in both pixels and percentages" + ) sensitivity = config.attributes.fields["sensitivity"].number_value if sensitivity < 0 or sensitivity > 1: raise ValueError("Sensitivity should be a number between 0.0 and 1.0") - max_box_size = config.attributes.fields["max_box_size"].number_value + max_box_size = config.attributes.fields["max_box_size"].number_value max_box_percent = config.attributes.fields["max_box_percent"].number_value if max_box_size < 0: - raise ValueError("Maximum bounding box size should be a non-negative integer") + raise ValueError( + "Maximum bounding box size should be a non-negative integer" + ) if max_box_percent < 0.0 or max_box_percent > 1.0: - raise ValueError("Maximum bounding box percent should be between 0.0 and 1.0") + raise ValueError( + "Maximum bounding box percent should be between 0.0 and 1.0" + ) if max_box_size != 0 and max_box_percent != 0.0: - raise ValueError("Cannot specify the maximum box in both pixels and percentages") + raise ValueError( + "Cannot specify the maximum box in both pixels and percentages" + ) return [source_cam] @@ -115,6 +125,14 @@ def reconfigure( self.max_box_size = config.attributes.fields["max_box_size"].number_value self.max_box_percent = config.attributes.fields["max_box_percent"].number_value + # Crop region is optional, so we need to check if it exists + if config.attributes.fields["crop_region"].struct_value: + self.crop_region = dict( + config.attributes.fields["crop_region"].struct_value.fields + ) + else: + self.crop_region = None + # This will be the main method implemented in this module. # Given a camera. Perform frame differencing and return how much of the image is moving async def get_classifications( @@ -133,6 +151,7 @@ async def get_classifications( "image mime type must be PNG or JPEG, not ", input1.mime_type ) img1 = pil.viam_to_pil_image(input1) + img1, _, _ = self.crop_image(img1) gray1 = cv2.cvtColor(np.array(img1), cv2.COLOR_BGR2GRAY) input2 = await self.camera.get_image() @@ -141,6 +160,7 @@ async def get_classifications( "image mime type must be PNG or JPEG, not ", input2.mime_type ) img2 = pil.viam_to_pil_image(input2) + img2, _, _ = self.crop_image(img2) gray2 = cv2.cvtColor(np.array(img2), cv2.COLOR_BGR2GRAY) return self.classification_from_gray_imgs(gray1=gray1, gray2=gray2) @@ -163,8 +183,7 @@ async def get_classifications_from_camera( "is not the configured 'cam_name'", self.cam_name, ) - image = await self.camera.get_image() - return await self.get_classifications(image=image, count=count) + return await self.get_classifications(image=None, count=count) # Not implemented for now. Eventually want this to return the location of the movement async def get_detections( @@ -191,7 +210,6 @@ async def get_detections( ) img2 = pil.viam_to_pil_image(input2) gray2 = cv2.cvtColor(np.array(img2), cv2.COLOR_BGR2GRAY) - return self.detections_from_gray_imgs(gray1, gray2) async def get_detections_from_camera( @@ -314,7 +332,9 @@ def detections_from_gray_imgs(self, gray1, gray2): img_out = cv2.erode(img3, kernel2) # List points around the remaining blobs - contours, _ = cv2.findContours(img_out, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + contours, _ = cv2.findContours( + img_out, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE + ) # Make boxes from the contours for c in contours: @@ -357,3 +377,17 @@ def detections_from_gray_imgs(self, gray1, gray2): detections.append(detection) return detections + + def crop_image(self, image: PIL.Image.Image): + if not self.crop_region: + return image + else: + width, height = image.size + x1 = int(self.crop_region["x1_rel"] * width) + y1 = int(self.crop_region["y1_rel"] * height) + x2 = int(self.crop_region["x2_rel"] * width) + y2 = int(self.crop_region["y2_rel"] * height) + return image.crop((x1, y1, x2, y2)), width, height + + def retrieve_original_coordinates(self, x_normalized, y_normalized, width, height): + pass From e9236a9f8d7e81d2b5fbb74ab5bf24865fb5e53d Mon Sep 17 00:00:00 2001 From: Rob1in Date: Wed, 23 Apr 2025 11:59:07 -0400 Subject: [PATCH 2/6] add crop region logic for get_detections logic --- src/motion_detector.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/motion_detector.py b/src/motion_detector.py index ff0e7ac..b54a3d1 100644 --- a/src/motion_detector.py +++ b/src/motion_detector.py @@ -130,6 +130,10 @@ def reconfigure( self.crop_region = dict( config.attributes.fields["crop_region"].struct_value.fields ) + self.crop_region["x1_rel"] = float(self.crop_region["x1_rel"].number_value) + self.crop_region["y1_rel"] = float(self.crop_region["y1_rel"].number_value) + self.crop_region["x2_rel"] = float(self.crop_region["x2_rel"].number_value) + self.crop_region["y2_rel"] = float(self.crop_region["y2_rel"].number_value) else: self.crop_region = None @@ -201,6 +205,7 @@ async def get_detections( "image mime type must be PNG or JPEG, not ", input1.mime_type ) img1 = pil.viam_to_pil_image(input1) + img1, width, height = self.crop_image(img1) gray1 = cv2.cvtColor(np.array(img1), cv2.COLOR_BGR2GRAY) input2 = await self.camera.get_image() @@ -209,8 +214,9 @@ async def get_detections( "image mime type must be PNG or JPEG, not ", input2.mime_type ) img2 = pil.viam_to_pil_image(input2) + img2, width, height = self.crop_image(img2) gray2 = cv2.cvtColor(np.array(img2), cv2.COLOR_BGR2GRAY) - return self.detections_from_gray_imgs(gray1, gray2) + return self.detections_from_gray_imgs(gray1, gray2, width, height) async def get_detections_from_camera( self, @@ -309,7 +315,7 @@ def classification_from_gray_imgs(self, gray1, gray2): classifications = [{"class_name": "motion", "confidence": conf}] return classifications - def detections_from_gray_imgs(self, gray1, gray2): + def detections_from_gray_imgs(self, gray1, gray2, width, height): detections = [] # Frame difference diff = cv2.absdiff(gray2, gray1) @@ -355,6 +361,17 @@ def detections_from_gray_imgs(self, gray1, gray2): if self.max_box_percent > 0 and area_percent > self.max_box_percent: continue + if self.crop_region: + # Adjust coordinates based on crop region + x_offset = int(self.crop_region.get("x1_rel") * width) + y_offset = int(self.crop_region.get("y1_rel") * height) + + # Convert back to original image coordinates + xmin = min(width - 1, xmin + x_offset) + ymin = min(height - 1, ymin + y_offset) + xmax = min(width - 1, xmax + x_offset) + ymax = min(height - 1, ymax + y_offset) + detection = { "confidence": 0.5, "class_name": "motion", @@ -380,7 +397,7 @@ def detections_from_gray_imgs(self, gray1, gray2): def crop_image(self, image: PIL.Image.Image): if not self.crop_region: - return image + return image, None, None else: width, height = image.size x1 = int(self.crop_region["x1_rel"] * width) From e88beef2ca4a3fe5479ffc4095864df0ea2450fa Mon Sep 17 00:00:00 2001 From: Rob1in Date: Wed, 23 Apr 2025 12:01:45 -0400 Subject: [PATCH 3/6] fix tests for change --- src/motion_detector.py | 2 +- tests/test_motiondetector.py | 175 +++++++++++++++++++++++------------ 2 files changed, 115 insertions(+), 62 deletions(-) diff --git a/src/motion_detector.py b/src/motion_detector.py index b54a3d1..be67ead 100644 --- a/src/motion_detector.py +++ b/src/motion_detector.py @@ -315,7 +315,7 @@ def classification_from_gray_imgs(self, gray1, gray2): classifications = [{"class_name": "motion", "confidence": conf}] return classifications - def detections_from_gray_imgs(self, gray1, gray2, width, height): + def detections_from_gray_imgs(self, gray1, gray2, width=None, height=None): detections = [] # Frame difference diff = cv2.absdiff(gray2, gray1) diff --git a/tests/test_motiondetector.py b/tests/test_motiondetector.py index 187c5ac..3d955b5 100644 --- a/tests/test_motiondetector.py +++ b/tests/test_motiondetector.py @@ -1,36 +1,39 @@ -from src.motion_detector import MotionDetector -from tests.fakecam import FakeCamera -from PIL import Image +from typing import Any, List, Mapping from unittest.mock import MagicMock, patch + +import cv2 +import numpy as np +import pytest +from google.protobuf.struct_pb2 import Struct +from parameterized import parameterized +from PIL import Image from viam.components.camera import Camera from viam.proto.app.robot import ComponentConfig -from google.protobuf.struct_pb2 import Struct from viam.services.vision import CaptureAllResult, Classification, Detection -from typing import List, Mapping, Any -from parameterized import parameterized -import pytest -import cv2 -import numpy as np +from src.motion_detector import MotionDetector +from tests.fakecam import FakeCamera pytest.source_camera_name_none_defined_error_message = "Source camera must be provided as 'cam_name' or 'camera_name', but neither was provided" pytest.source_camera_name_both_defined_error_message = "Source camera must be provided as 'cam_name' or 'camera_name', but both were provided" + def make_component_config(dictionary: Mapping[str, Any]) -> ComponentConfig: - struct = Struct() - struct.update(dictionary=dictionary) - return ComponentConfig(attributes=struct) + struct = Struct() + struct.update(dictionary=dictionary) + return ComponentConfig(attributes=struct) def getMD(): md = MotionDetector("test") md.sensitivity = 0.9 - md.min_box_size = 1000 + md.min_box_size = 1000 md.min_box_percent = 0 - md.max_box_size = 0 + md.max_box_size = 0 md.max_box_percent = 0 md.cam_name = "test" md.camera = FakeCamera("test") + md.crop_region = None return md @@ -38,22 +41,34 @@ class TestConfigValidation: def test_empty(self): md = getMD() empty_config = make_component_config({}) - with pytest.raises(ValueError, match=pytest.source_camera_name_none_defined_error_message): + with pytest.raises( + ValueError, match=pytest.source_camera_name_none_defined_error_message + ): response = md.validate_config(config=empty_config) - # For each way to specify a valid min/max size, have a test that checks it's valid. - @parameterized.expand(( - ("all defaults", {}), - ("min in pixels", {"min_box_size": 3}), - ("min in percentage", {"min_box_percent": 0.1}), - ("max in pixels", {"max_box_size": 300}), - ("max in percentage", {"max_box_percent": 0.9}), - ("min and max in pixels", {"min_box_size": 3, "max_box_size": 300}), - ("min in pixels, max in percentage", {"min_box_size": 3, "max_box_percent": 0.9}), - ("min and max in percentage", {"min_box_percent": 0.1, "max_box_percent": 0.9}), - ("min in percentage, max in pixels", {"min_box_percent": 0.1, "max_box_size": 300}), - )) + @parameterized.expand( + ( + ("all defaults", {}), + ("min in pixels", {"min_box_size": 3}), + ("min in percentage", {"min_box_percent": 0.1}), + ("max in pixels", {"max_box_size": 300}), + ("max in percentage", {"max_box_percent": 0.9}), + ("min and max in pixels", {"min_box_size": 3, "max_box_size": 300}), + ( + "min in pixels, max in percentage", + {"min_box_size": 3, "max_box_percent": 0.9}, + ), + ( + "min and max in percentage", + {"min_box_percent": 0.1, "max_box_percent": 0.9}, + ), + ( + "min in percentage, max in pixels", + {"min_box_percent": 0.1, "max_box_size": 300}, + ), + ) + ) def test_valid(self, unused_test_name, extra_config_values): md = getMD() raw_config = {"cam_name": "test"} @@ -62,20 +77,43 @@ def test_valid(self, unused_test_name, extra_config_values): response = md.validate_config(config=config) assert response == ["test"] - # For each type of invalid config, test that the expected error is raised. - @parameterized.expand(( - ("Minimum bounding box size should be a non-negative integer", {"min_box_size": -1}), - ("Minimum bounding box percent should be between 0.0 and 1.0", {"min_box_percent": -0.1}), - ("Minimum bounding box percent should be between 0.0 and 1.0", {"min_box_percent": 1.1}), - ("Maximum bounding box size should be a non-negative integer", {"max_box_size": -1}), - ("Maximum bounding box percent should be between 0.0 and 1.0", {"max_box_percent": -0.1}), - ("Maximum bounding box percent should be between 0.0 and 1.0", {"max_box_percent": 1.1}), - ("Cannot specify the minimum box in both pixels and percentages", - {"min_box_size": 3, "min_box_percent": 0.1}), - ("Cannot specify the maximum box in both pixels and percentages", - {"max_box_size": 300, "max_box_percent": 0.9}), - )) + @parameterized.expand( + ( + ( + "Minimum bounding box size should be a non-negative integer", + {"min_box_size": -1}, + ), + ( + "Minimum bounding box percent should be between 0.0 and 1.0", + {"min_box_percent": -0.1}, + ), + ( + "Minimum bounding box percent should be between 0.0 and 1.0", + {"min_box_percent": 1.1}, + ), + ( + "Maximum bounding box size should be a non-negative integer", + {"max_box_size": -1}, + ), + ( + "Maximum bounding box percent should be between 0.0 and 1.0", + {"max_box_percent": -0.1}, + ), + ( + "Maximum bounding box percent should be between 0.0 and 1.0", + {"max_box_percent": 1.1}, + ), + ( + "Cannot specify the minimum box in both pixels and percentages", + {"min_box_size": 3, "min_box_percent": 0.1}, + ), + ( + "Cannot specify the maximum box in both pixels and percentages", + {"max_box_size": 300, "max_box_percent": 0.9}, + ), + ) + ) def test_invalid(self, error_message, extra_config_values): md = getMD() raw_config = {"cam_name": "test"} @@ -88,14 +126,18 @@ def test_empty_config_name(self): md = getMD() raw_config = {"cam_name": ""} config = make_component_config(raw_config) - with pytest.raises(ValueError, match=pytest.source_camera_name_none_defined_error_message): + with pytest.raises( + ValueError, match=pytest.source_camera_name_none_defined_error_message + ): response = md.validate_config(config=config) # For each way to specify a valid camera name, test that the return is valid. - @parameterized.expand(( - ("cam_name defined, camera_name not defined", {"cam_name": "test"}), - ("camera_name defined, cam_name not defined", {"camera_name": "test"}), - )) + @parameterized.expand( + ( + ("cam_name defined, camera_name not defined", {"cam_name": "test"}), + ("camera_name defined, cam_name not defined", {"camera_name": "test"}), + ) + ) def test_valid_camera_names(self, unused_test_name, cam_config): md = getMD() config = make_component_config(cam_config) @@ -103,24 +145,42 @@ def test_valid_camera_names(self, unused_test_name, cam_config): assert response == ["test"] # For each way to spedify an invalid camera name, test that the expected error is raised. - @parameterized.expand(( - ("cam_name not defined, camera_name not defined", {}, pytest.source_camera_name_none_defined_error_message), - ("cam_name defined, camera_name defined", {"camera_name": "test", "cam_name": "test"}, pytest.source_camera_name_both_defined_error_message), - ("cam_name empty, camera_name empty", {"cam_name": "", "camera_name": ""}, pytest.source_camera_name_none_defined_error_message), - )) + @parameterized.expand( + ( + ( + "cam_name not defined, camera_name not defined", + {}, + pytest.source_camera_name_none_defined_error_message, + ), + ( + "cam_name defined, camera_name defined", + {"camera_name": "test", "cam_name": "test"}, + pytest.source_camera_name_both_defined_error_message, + ), + ( + "cam_name empty, camera_name empty", + {"cam_name": "", "camera_name": ""}, + pytest.source_camera_name_none_defined_error_message, + ), + ) + ) def test_invalid_camera_names(self, unused_test_name, cam_config, error_message): md = getMD() config = make_component_config(cam_config) with pytest.raises(ValueError, match=error_message): response = md.validate_config(config=config) + class TestMotionDetector: @staticmethod async def get_output(md): - out = await md.capture_all_from_camera("test",return_image=True, - return_classifications=True, - return_detections=True, - return_object_point_clouds=True) + out = await md.capture_all_from_camera( + "test", + return_image=True, + return_classifications=True, + return_detections=True, + return_object_point_clouds=True, + ) assert isinstance(out, CaptureAllResult) assert out.image is not None assert out.classifications is not None @@ -128,7 +188,6 @@ async def get_output(md): assert out.classifications[0]["class_name"] == "motion" return out - def test_classifications(self): img1 = Image.open("tests/img1.jpg") img2 = Image.open("tests/img2.jpg") @@ -140,7 +199,6 @@ def test_classifications(self): assert len(classifications) == 1 assert classifications[0]["class_name"] == "motion" - def test_detections(self): img1 = Image.open("tests/img1.jpg") img2 = Image.open("tests/img2.jpg") @@ -156,7 +214,6 @@ def test_detections(self): assert "x_max_normalized" in detections[0] assert "y_max_normalized" in detections[0] - @pytest.mark.asyncio async def test_properties(self): md = getMD() @@ -165,7 +222,6 @@ async def test_properties(self): assert props.detections_supported == True assert props.object_point_clouds_supported == False - @pytest.mark.asyncio async def test_captureall(self): md = getMD() @@ -174,7 +230,6 @@ async def test_captureall(self): assert out.detections[0]["class_name"] == "motion" assert out.objects is None - @pytest.mark.asyncio async def test_captureall_not_too_large(self): md = getMD() @@ -184,7 +239,6 @@ async def test_captureall_not_too_large(self): assert out.detections[0]["class_name"] == "motion" assert out.objects is None - @pytest.mark.asyncio async def test_captureall_too_small(self): md = getMD() @@ -192,7 +246,6 @@ async def test_captureall_too_small(self): out = await self.get_output(md) assert out.detections == [] - @pytest.mark.asyncio async def test_captureall_too_large(self): md = getMD() From bfe5aeb872091c4ac0683b19c879c395ad71275c Mon Sep 17 00:00:00 2001 From: Rob1in Date: Wed, 23 Apr 2025 13:12:08 -0400 Subject: [PATCH 4/6] add crop region validation --- src/motion_detector.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/motion_detector.py b/src/motion_detector.py index be67ead..806496f 100644 --- a/src/motion_detector.py +++ b/src/motion_detector.py @@ -103,6 +103,29 @@ def validate_config(cls, config: ServiceConfig) -> Sequence[str]: "Cannot specify the maximum box in both pixels and percentages" ) + if config.attributes.fields["crop_region"].struct_value: + crop_region = dict( + config.attributes.fields["crop_region"].struct_value.fields + ) + x1_rel = float(crop_region["x1_rel"].number_value) + x2_rel = float(crop_region["x2_rel"].number_value) + y1_rel = float(crop_region["y1_rel"].number_value) + y2_rel = float(crop_region["y2_rel"].number_value) + + if x1_rel < 0.0 or x1_rel > 1.0: + raise ValueError("x1_rel should be between 0.0 and 1.0") + if x2_rel < 0.0 or x2_rel > 1.0: + raise ValueError("x2_rel should be between 0.0 and 1.0") + if y1_rel < 0.0 or y1_rel > 1.0: + raise ValueError("y1_rel should be between 0.0 and 1.0") + if y2_rel < 0.0 or y2_rel > 1.0: + raise ValueError("y2_rel should be between 0.0 and 1.0") + if x1_rel >= x2_rel: + raise ValueError("x1_rel should be less than x2_rel") + if x1_rel > x2_rel: + raise ValueError("x1_rel should be less than x2_rel") + if y1_rel > y2_rel: + raise ValueError("y1_rel should be less than y2_rel") return [source_cam] # Handles attribute reconfiguration From 2e9a82fd502ee372de30aa548746f0800e16d045 Mon Sep 17 00:00:00 2001 From: Rob1in Date: Wed, 23 Apr 2025 14:02:59 -0400 Subject: [PATCH 5/6] update readme --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 526ee19..87c1fa2 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,10 @@ The following attributes are available for `viam:vision:motion-detector` vision | `min_box_percent` | int | **Optional** | The fraction of the image (between 0 and 1) that the smallest bounding box must cover. Relevant for GetDetections/GetDetectionsFromCamera only. You must specify at most one of `min_box_size` and `min_box_percent`. | `max_box_size` | int | **Optional** | The size (in square pixels) of the largest bounding box to allow. Relevant for GetDetections/GetDetectionsFromCamera only. You must specify at most one of `max_box_size` and `max_box_percent`. | `max_box_percent` | int | **Optional** | The fraction of the image (between 0 and 1) that the largest bounding box can cover. Relevant for GetDetections/GetDetectionsFromCamera only. You must specify at most one of `max_box_size` and `max_box_percent`. -| `sensitivity` | float | **Optional** | A number from 0 - 1. Larger numbers will make the module more sensitive to motion. Default = 0.9 | +| `sensitivity` | float | **Optional** | A number from 0 - 1. Larger numbers will make the module more sensitive to motion. Default = 0.9 +| `crop_region` | dict | **Optional** | Defines a region of the image to crop for processing. Must include four float values between 0 and 1: `x1_rel`, `y1_rel`, `x2_rel`, `y2_rel` representing the relative coordinates of the crop region.| + + > [!WARNING] > Either one of `camera_name` or `cam_name` will be accepted, but not both. `camera_name` is preferred. From 3d1f7781f3ca06b3b37dcb987aa17001e662bad4 Mon Sep 17 00:00:00 2001 From: Rob1in Date: Wed, 23 Apr 2025 14:07:52 -0400 Subject: [PATCH 6/6] linting --- Makefile | 2 +- src/motion_detector.py | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 67adb76..2981036 100644 --- a/Makefile +++ b/Makefile @@ -17,5 +17,5 @@ dist/main: . .venv/bin/activate && python -m PyInstaller --onefile --hidden-import="googleapiclient" --add-data="./src:src" src/main.py lint: - . .venv/bin/activate && pylint --disable=C0114,E0401,E1101,C0116,W0613,R0913,C0116,R0914,C0103,W0201,W0719 src/ + . .venv/bin/activate && pylint --disable=C0114,E0401,E1101,C0116,W0613,R0913,C0116,R0914,C0103,W0201,W0719,R0902,R0912 src/ diff --git a/src/motion_detector.py b/src/motion_detector.py index 806496f..4b23d79 100644 --- a/src/motion_detector.py +++ b/src/motion_detector.py @@ -413,7 +413,6 @@ def detections_from_gray_imgs(self, gray1, gray2, width=None, height=None): "y_max_normalized": ymax / diff.shape[0], } ) - detections.append(detection) return detections @@ -421,13 +420,12 @@ def detections_from_gray_imgs(self, gray1, gray2, width=None, height=None): def crop_image(self, image: PIL.Image.Image): if not self.crop_region: return image, None, None - else: - width, height = image.size - x1 = int(self.crop_region["x1_rel"] * width) - y1 = int(self.crop_region["y1_rel"] * height) - x2 = int(self.crop_region["x2_rel"] * width) - y2 = int(self.crop_region["y2_rel"] * height) - return image.crop((x1, y1, x2, y2)), width, height + width, height = image.size + x1 = int(self.crop_region["x1_rel"] * width) + y1 = int(self.crop_region["y1_rel"] * height) + x2 = int(self.crop_region["x2_rel"] * width) + y2 = int(self.crop_region["y2_rel"] * height) + return image.crop((x1, y1, x2, y2)), width, height def retrieve_original_coordinates(self, x_normalized, y_normalized, width, height): pass