diff --git a/.gitignore b/.gitignore
index 51cc300c0..b7c783b60 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,3 +11,11 @@
__pycache__
build
ros2/lanefollowingtest/LaneVideo.mp4
+log/COLCON_IGNORE
+log/latest
+log/latest_version-check
+log/version-check_2024-03-28_13-31-27/logger_all.log
+ros2/log/latest_build
+ros2/log/build_2024-03-28_13-35-11/logger_all.log
+ros2/log/build_2024-03-28_13-39-36/logger_all.log
+ros2/log/latest_build
diff --git a/UI/Homepage.py b/UI/Homepage.py
deleted file mode 100644
index cea9bebcd..000000000
--- a/UI/Homepage.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import streamlit as st
-from streamlit import runtime
-from streamlit.web import cli as stcli
-import sys
-import cv2
-
-
-def runner():
- st.header("Welcome to the IGVC Homepage!")
- st.write("Please select an option on the sidebar to the right.")
- st.write("For the freshmen: You guys can beautify this up however you'd like")
- st.write(
- "Streamlit supports fast reloads, just refresh the python script to get going!")
- st.write("Docs are here: https://docs.streamlit.io/library/api-reference")
- st.write("This is also extremely relevant: https://stackoverflow.com/questions/67382181/how-to-use-streamlit-in-ros")
-
-
-
-# This allows the system to run without being in a dedicated streamlit session:
-if __name__ == "__main__":
- if runtime.exists():
- st.set_page_config(
- page_title="IGVC Homepage",
- page_icon="🚗")
- st.sidebar.success("Select a demo above.")
- runner()
- else:
- sys.argv = ["streamlit", "run", sys.argv[0]]
- sys.exit(stcli.main())
diff --git "a/UI/pages/\360\237\233\243 Lane_Detection.py" "b/UI/pages/\360\237\233\243 Lane_Detection.py"
deleted file mode 100644
index 6976ddf2d..000000000
--- "a/UI/pages/\360\237\233\243 Lane_Detection.py"
+++ /dev/null
@@ -1,35 +0,0 @@
-import streamlit as st
-import cv2
-
-def render_imgs(src, tabs):
- # SRC is cv2.VideoCapture FOR NOW: Will need to be tweaked in the future to rossify
- vidcap = cv2.VideoCapture(src)
- frame_containers = [tab.empty() for tab in tabs]
- while vidcap.isOpened():
- ret, frame = vidcap.read()
- if not ret:
- st.write("Video Capture Stopped")
- else:
- frame_containers[0].image(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),channels="RGB")
- frame_containers[1].image(cv2.cvtColor(frame, cv2.COLOR_BGR2HSV_FULL),channels="HSV")
- frame_containers[2].image(cv2.resize(frame, (144, 144)),channels="BGR")
- if cv2.waitKey(1) & 0xFF == ord("q"):
- break
- vidcap.release()
- cv2.destroyAllWindows()
-
-
-## See ../../ros2/src/lanefollowingtest/LaneDetection.py
-## This page should render ALL images in there, as well as publish
-if __name__ == "__main__":
- st.set_page_config(
- page_title="Lane Detection",
- page_icon="🛣")
- st.write("This should render all of the images related to Lane Detection, and relevant parameters.")
- st.checkbox("Display Parameters")
- tabs = st.tabs(["Original","HSV", "Sliding Windows"])
- render_imgs("/dev/video0", (tabs))
-
-
- ##I Don't even know if we'll want this, but it's a nice to have anyway
- Restart = st.button("ESTOP",type="primary")
\ No newline at end of file
diff --git a/UI/ui_generics.py b/UI/ui_generics.py
deleted file mode 100644
index 139597f9c..000000000
--- a/UI/ui_generics.py
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE b/ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml b/ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml
deleted file mode 100644
index 7be7e8f94..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/package.xml
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-
- turtlebot_vel_ctrl
- 0.0.0
- TODO: Package description
- root
- Apache-2.0
-
- ament_copyright
- ament_flake8
- ament_pep257
- python3-pytest
-
-
- ament_python
-
-
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/resource/turtlebot_vel_ctrl b/ros2/TurtlebotController/turtlebot_vel_ctrl/resource/turtlebot_vel_ctrl
deleted file mode 100644
index e69de29bb..000000000
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg b/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg
deleted file mode 100644
index 9060e4b44..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/turtlebot_vel_ctrl
-[install]
-install_scripts=$base/lib/turtlebot_vel_ctrl
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py
deleted file mode 100644
index 33e25ed6d..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/setup.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from setuptools import find_packages, setup
-
-package_name = 'turtlebot_vel_ctrl'
-
-setup(
- name=package_name,
- version='0.0.0',
- packages=find_packages(exclude=['test']),
- data_files=[
- ('share/ament_index/resource_index/packages',
- ['resource/' + package_name]),
- ('share/' + package_name, ['package.xml']),
- ],
- install_requires=['setuptools'],
- zip_safe=True,
- maintainer='root',
- maintainer_email='root@todo.todo',
- description='TODO: Package description',
- license='Apache-2.0',
- tests_require=['pytest'],
- entry_points={
- 'console_scripts': [
- 'vel_ctrl = turtlebot_vel_ctrl.vel_ctrl:main'
- ],
- },
-)
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py
deleted file mode 100644
index 97a39196e..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_copyright.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2015 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ament_copyright.main import main
-import pytest
-
-
-# Remove the `skip` decorator once the source file(s) have a copyright header
-@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
-@pytest.mark.copyright
-@pytest.mark.linter
-def test_copyright():
- rc = main(argv=['.', 'test'])
- assert rc == 0, 'Found errors'
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py
deleted file mode 100644
index 27ee1078f..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_flake8.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2017 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ament_flake8.main import main_with_errors
-import pytest
-
-
-@pytest.mark.flake8
-@pytest.mark.linter
-def test_flake8():
- rc, errors = main_with_errors(argv=[])
- assert rc == 0, \
- 'Found %d code style errors / warnings:\n' % len(errors) + \
- '\n'.join(errors)
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py
deleted file mode 100644
index b234a3840..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/test/test_pep257.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2015 Open Source Robotics Foundation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ament_pep257.main import main
-import pytest
-
-
-@pytest.mark.linter
-@pytest.mark.pep257
-def test_pep257():
- rc = main(argv=['.', 'test'])
- assert rc == 0, 'Found code style errors / warnings'
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/__init__.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py b/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py
deleted file mode 100644
index 32f5d4669..000000000
--- a/ros2/TurtlebotController/turtlebot_vel_ctrl/turtlebot_vel_ctrl/vel_ctrl.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# from irobot_create_msgs.msg import WheelTicks, WheelTicks
-from geometry_msgs.msg import Twist
-from std_msgs.msg import Float64
-
-import rclpy
-from rclpy.node import Node
-from rclpy.qos import qos_profile_sensor_data
-
-# publishing twist message top the turtle bot
-# subscribing to whatever the velocity and angle topics
-
-class TurtleBot4VelCtrlNode(Node):
-
- def __init__(self):
- super().__init__('turtlebot4_velctrl_node')
-
- self.camData_subscriber = self.create_subscription(
- Float64,
- '/cam_data',
- self.camData_callback,
- qos_profile_sensor_data)
-
-
- self.vel_publisher = self.create_publisher(Twist,'robot_0/cmd_vel', 10)
-
- def camData_callback(self,msg_in):
- msg_out = Twist()
-
- k = 1.0
- err_z = msg_in.data
-
- ang_input = err_z * k
-
- msg_out.linear.x = 0.1 #define constant speed
- msg_out.linear.y = 0.0
- msg_out.linear.z = 0.0
-
- msg_out.angular.x = 0.0
- msg_out.angular.y = 0.0
- msg_out.angular.z = ang_input
-
- self.vel_publisher.publish(msg_out)
-
-def main(args=None):
- rclpy.init(args=args) # Initialize ROS2 program
- node = TurtleBot4VelCtrlNode() # Instantiate Node
- rclpy.spin(node) # Puts the node in an infinite loop
-
- # Clean shutdown should be at the end of every node
- node.destroy_node()
- rclpy.shutdown()
-
-if __name__ == '__main__':
- main()
diff --git a/UI/.streamlit/config.toml b/ros2/UI/.streamlit/config.toml
similarity index 100%
rename from UI/.streamlit/config.toml
rename to ros2/UI/.streamlit/config.toml
diff --git a/ros2/UI/Homepage.py b/ros2/UI/Homepage.py
new file mode 100644
index 000000000..372f33ef7
--- /dev/null
+++ b/ros2/UI/Homepage.py
@@ -0,0 +1,61 @@
+import streamlit as st
+from streamlit import runtime
+from streamlit.web import cli as stcli
+import sys
+from ui_generics import *
+
+@st.cache_data #
+def runner():
+ st.header("Welcome to the IGVC Homepage!")
+ st.markdown('''
+ Please select an option on the sidebar to the right: This specifies which UI file you'd like to run
+
+
+ Streamlit supports fast reloads: Just refresh the python script to get going!
+
+
+ Docs are here: https://docs.streamlit.io/library/api-reference
+ ''')
+ st.divider()
+ st.markdown('''
+ For the freshmen: This UI is designed to serve multiple purposes
+
+ #1. We need a way to render all of the openCV data we have: This is stuff like the lane or object detection.
+
+ #2. We need a way to tune parameters in an easy way, without refreshing every single page for small little tweaks.
+
+ #3. We need to be able to control the state machines on the car: Each of the function tests is effectively it's own state machine.
+ We need to be able to control which test we're in, and visualize any important data in a direct manner.
+
+
+ #4. It makes us look professional, and adds quite a bit of useful fluff for our design report.
+ ''')
+ st.divider()
+ st.markdown('''
+ Of course, not a lot of this is in your control: We haven't built the state machines or finished all of our algorithms.
+ However, Lane Detection is in the perfect place for you to familiarize yourself with both streamlit and the entire tech stack.
+ I'm detailing all immediate tasks here: Please allocate amongst yourselves.
+ I have taken the liberty of building a bespoke ros2 publisher and subscriber system (was not trivial), that should provide a solid framework.
+ However, it will get extremely tedious to build a proper UI if you don't exploit some of the advantages of python.
+ Read about list comprehension and get good. ''')
+ st.divider()
+ st.header("Tasks")
+ st.markdown( '''
+ #1: Connecting all datastreams from the lane_detection algorithm into here.
+ Everything that is cv.imshow() is rendered in that, in a clean and effective manner.
+ #2: Parametrizing all constants in that: anything that had a slider should be here, and it should be published.
+ #3: Beautifying, and building more tabs & systems so this is the only interface we need with the car.
+ We should not be opening up or handling multiple windows with the judges.
+ Debugging systems, sensor messages (E.g IMU, CAN) should all live here.
+
+ ''')
+
+
+# This allows the system to run without being in a dedicated streamlit session:
+if __name__ == "__main__":
+ if runtime.exists():
+ sidebar()
+ runner()
+ else:
+ sys.argv = ["streamlit", "run", "--client.showSidebarNavigation=False" ,sys.argv[0]]
+ sys.exit(stcli.main())
diff --git a/ros2/UI/pages/Lane_Detection.py b/ros2/UI/pages/Lane_Detection.py
new file mode 100644
index 000000000..07cdbb3b9
--- /dev/null
+++ b/ros2/UI/pages/Lane_Detection.py
@@ -0,0 +1,218 @@
+import streamlit as st
+import cv2
+from sensor_msgs.msg import Image
+import rclpy
+from rclpy.node import Node
+from rclpy.qos import qos_profile_sensor_data
+from cv_bridge import CvBridge
+import time
+from std_msgs.msg import String
+import numpy as np
+from ui_generics import *
+
+# This class currently subscribes to some topics in cam_publisher, which I used to test. Please rewrite for lane Detection.
+
+
+class Image_Handler(Node):
+
+ def __init__(self, containers):
+ self.reference_img = None
+ self.topics = ["raw_left", "/raw_right", "/tf_left",
+ "/tf_right", "/sliding_left", "/sliding_right"]
+ super().__init__('Streamlit_Image_Handler')
+ self._bridge = CvBridge()
+ self.containers = containers
+ self.lane_state_pub = self.create_subscription(
+ String, "lane_state", self.lane_state_callback, qos_profile_sensor_data)
+
+ self._left_subscriber = self.create_subscription(
+ Image, self.topics[0], lambda msg: self.camData_callback(msg, self.containers[0], img_to_repl="THIS"), qos_profile_sensor_data)
+ self._right_subscriber = self.create_subscription(
+ Image, self.topics[1], lambda msg: self.camData_callback(msg, self.containers[1]), qos_profile_sensor_data)
+ self._tf_left_subscriber = self.create_subscription(
+ Image, self.topics[2], lambda msg: self.camData_callback(msg, self.containers[2]), qos_profile_sensor_data)
+ self._tf_right_subscriber = self.create_subscription(
+ Image, self.topics[3], lambda msg: self.camData_callback(msg, self.containers[3]), qos_profile_sensor_data)
+ self._sliding_left_subscriber = self.create_subscription(
+ Image, self.topics[4], lambda msg: self.camData_callback(msg, self.containers[4]), qos_profile_sensor_data)
+ self._sliding_right_subscriber = self.create_subscription(
+ Image, self.topics[5], lambda msg: self.camData_callback(msg, self.containers[5]), qos_profile_sensor_data)
+
+ # self._frame_containers[] is a container corresponding to one of the tabs: You can create
+
+ def camData_callback(self, msg_in, img_location, img_to_repl=None):
+ raw = msg_in
+ img = self._bridge.imgmsg_to_cv2(raw, desired_encoding="passthrough")
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ if (img_to_repl is not None):
+ self.reference_img = img
+ with img_location:
+ st.image(img)
+
+ def lane_state_callback(self, msg):
+ pass
+ # container = st.empty()
+ # container.write(msg.data)
+
+
+# This class is your publisher: Flesh it out and integrate accordingly
+class Publisher(Node):
+ def __init__(self):
+ super().__init__('Streamlit_Button_Publisher')
+ self.logger = self.get_logger()
+ self.button_pub = self.create_publisher(
+ String, '/streamlit/button', 10)
+
+
+def render_handler(context):
+ rclpy.try_shutdown()
+ rclpy.init()
+ context.empty()
+ with context:
+ tabs = st.tabs(
+ ["Original", "Birds Eye View", "Sliding Windows"])
+ containers = []
+ frame_containers = []
+ for tab in tabs:
+ cols = tab.columns(2)
+ containers.append(cols[0])
+ containers.append(cols[1])
+ with cols[0]:
+ frame_containers.append(st.empty())
+ with cols[1]:
+ frame_containers.append(st.empty())
+ # This hunk initializes the ROS2 nodes without breaking anything :)
+ # Should not need to be tuoched
+ handler = get_publisher(frame_containers)
+ while (True):
+ rclpy.spin_once(handler)
+
+def demo_publish():
+ publisher = get_publisher()
+ msg = String()
+ msg.data = "Streamlit Button!"
+ publisher.button_pub.publish(msg)
+ st.success("Message Published!", icon="✅")
+
+
+def get_from_prior(labels, defaults):
+ defaults = np.resize(defaults, len(labels))
+ # for i in range(len(labels)):
+ # if labels[i] not in st.session_state:
+ # st.session_state[labels[i]] = defaults[i]
+ # else:
+ # defaults[i] = st.session_state[labels[i]]
+ return defaults
+
+
+def input_gen(func, labels, lowers, uppers, vals):
+ lowers = np.resize(lowers, len(labels))
+ uppers = np.resize(uppers, len(labels))
+ func_list = []
+ for i in range(len(labels)):
+ func_list.append(func(
+ labels[i], lowers[i], uppers[i], vals[i]))
+ return func_list
+
+def clear_session_state(*labels):
+ for labelset in labels:
+ for label in labelset:
+ del st.session_state[label]
+
+
+def input_handler(context):
+ rclpy.try_shutdown()
+ rclpy.init()
+ with context:
+ # handles all output
+ # We use get_from_prior to ensure that, even if the render checkbox was flipped, data persists
+ # Input gen allows me to streamline generating a lot of objects by making it super duper quick
+ tabs = st.tabs(["HSV Tweakers", "Coordinates", "Misc"])
+ # HSV user input
+ with tabs[0]:
+ l, h = st.columns(2)
+ # lower HSV slider
+ L_labels = ["Lower Hue", "Lower Saturation", "Lower Value"]
+ default_vals = [0, 0, 200]
+ default_vals = get_from_prior(L_labels, default_vals)
+ l_h, l_s, l_v = input_gen(l.slider, L_labels, [0], [255], default_vals)
+ U_labels = ["Upper Hue", "Upper Saturation", "Upper Value"]
+ default_vals = [255, 50, 255]
+ default_vals = get_from_prior(U_labels, default_vals)
+ # upper HSV slider
+ u_h, u_s, u_v = input_gen(h.slider, U_labels, [0], [255], default_vals)
+ # Coordinate input widget
+ with tabs[1]:
+ x, y = st.columns(2)
+ x_labels = ["Bottom Left x", "Top Left x",
+ "Bottom Right x", "Top Right x"]
+ default_vals = [12, 66, 635, 595]
+ default_vals = get_from_prior(x_labels, default_vals)
+ bl_x, tl_x, br_x, tr_x = input_gen(
+ x.slider, x_labels, [0], [640], default_vals)
+ y_labels = ["Bottom Left y", "Top Left y",
+ "Bottom Right y", "Top Right y"]
+ default_vals = [355, 304, 344, 308]
+ default_vals = get_from_prior(y_labels, default_vals)
+ bl_y, tl_y, br_y, tr_y = input_gen(
+ y.slider, y_labels, [0], [480], default_vals)
+
+ # This is souced from LaneDetection
+ bl = (bl_x, bl_y)
+ tl = (tl_x, tl_y)
+ br = (br_x, br_y)
+ tr = (tr_x, tr_y)
+ pts1 = np.float32([tl, bl, tr, br])
+ pts2 = np.float32([[0, 0], [0, 480], [640, 0], [640, 480]])
+ # Matrix to warp the image for birdseye window
+
+ # This is what we're really looking for, and what we might want to consider returning as a custom ros message...
+ # https://github.com/Box-Robotics/ros2_numpy
+ matrix = cv2.getPerspectiveTransform(pts1, pts2)
+ lower = np.array([l_h, l_s, l_v])
+ upper = np.array([u_h, u_s, u_v])
+
+ pub = get_publisher(None)
+ img = pub.reference_img
+
+ if img is not None:
+ cols = st.columns(3)
+ cols[0].image(img, "Left reference image for transformation")
+ transformed_left = cv2.warpPerspective(img, matrix, (640, 480))
+ hsv_transformed_left = cv2.cvtColor(
+ transformed_left, cv2.COLOR_BGR2HSV)
+ mask = cv2.inRange(hsv_transformed_left, lower, upper)
+ cols[1].image(transformed_left, "Left Birds Eye View Preview")
+ cols[2].image(mask, "Left Mask Preview")
+ # TODO: Make this button publish the custom ROS2 Message :)
+ #TODO: Make this button do soemthing (Read and write to file)
+ st.button("Publish!", on_click=demo_publish)
+
+@st.cache_resource
+def get_publisher(_tabs):
+ handler = Image_Handler(_tabs)
+ return handler
+
+@st.cache_resource
+def get_subscriber():
+ handler = Publisher()
+ return handler
+
+
+
+# See ../../ros2/src/lanefollowingtest/LaneDetection.py
+# This page should render ALL images in there, as well as publish important update data
+if __name__ == "__main__":
+ # How we Configure the page
+ st.set_page_config(
+ page_title="Lane Detection",
+ page_icon="🛣")
+ sidebar()
+ st.write(
+ "This page is designed to control the lane detection functionality, and allow for quick edits to the algorithm.")
+ render = st.checkbox("Render Video Feed",value=True)
+ display_holder = st.container()
+ if render:
+ render_handler(display_holder)
+ else:
+ input_handler(display_holder)
diff --git "a/UI/pages/\360\237\233\221 Object_Detection.py" b/ros2/UI/pages/Object_Detection.py
similarity index 65%
rename from "UI/pages/\360\237\233\221 Object_Detection.py"
rename to ros2/UI/pages/Object_Detection.py
index 4368c6bd5..0fb9f2289 100644
--- "a/UI/pages/\360\237\233\221 Object_Detection.py"
+++ b/ros2/UI/pages/Object_Detection.py
@@ -1,5 +1,5 @@
import streamlit as st
-
+from ui_generics import *
def image_source_renderer(image):
pass
@@ -10,6 +10,8 @@ def image_source_renderer(image):
if __name__ == "__main__":
st.set_page_config(
- page_title="Lane Detection",
+ page_title="Object Detection",
page_icon="🛑")
+ sidebar()
+
diff --git a/ros2/UI/ui_generics.py b/ros2/UI/ui_generics.py
new file mode 100644
index 000000000..9c5e7d1bd
--- /dev/null
+++ b/ros2/UI/ui_generics.py
@@ -0,0 +1,9 @@
+import streamlit as st
+
+
+def sidebar():
+ with st.sidebar:
+ st.page_link("Homepage.py", label=":derelict_house_building: Homepage", icon=None, help="Opens Homepage", disabled=False, use_container_width=True)
+ st.page_link("pages/Lane_Detection.py", label=":motorway: Lane Detection", icon=None, help="Opens Lane detection", disabled=False, use_container_width=True)
+ st.page_link("pages/Object_Detection.py", label=":octagonal_sign: Object Detection", icon=None, help="Opens Object detection", disabled=False, use_container_width=True)
+
diff --git a/ros2/lanefollowingtest/LaneDetection.py b/ros2/lanefollowingtest/LaneDetection.py
deleted file mode 100644
index 78a933de3..000000000
--- a/ros2/lanefollowingtest/LaneDetection.py
+++ /dev/null
@@ -1,342 +0,0 @@
-import cv2
-import numpy as np
-
-vidcap = cv2.VideoCapture("/dev/video0")
-success, image = vidcap.read()
-
-def nothing(x):
- pass
-
-class Lane_Follower():
-
- def __init__(self):
- self._left_fit = None
- self._right_fit = None
- self._binary_warped = None
-
- def set_binwarp(self, binwarp):
- self._binary_warped = binwarp
-
- def Plot_line(self, smoothen=False,prevFrameCount=6): #used Udacity's code to plot the lines and windows over lanes
- histogram = np.sum(self._binary_warped[self._binary_warped.shape[0]//2:, :], axis=0)
- # histogram = np.sum(self._binary_warped[self._binary_warped.shape[0]//2:,:], axis=0)
- # Create an output image to draw on and visualize the result
- out_img = np.dstack((self._binary_warped, self._binary_warped, self._binary_warped))*255
- # Find the peak of the left and right halves of the histogram
- # These will be the starting point for the left and right lines
- midpoint = np.int32(histogram.shape[0]/2)
- leftx_base = np.argmax(histogram[:midpoint])
- rightx_base = np.argmax(histogram[midpoint:]) + midpoint
- lane_width= abs(rightx_base-leftx_base)
- # Choose the number of sliding windows
- nwindows = 9
- # Set height of windows
- window_height = np.int32(self._binary_warped.shape[0]/nwindows)
- # Identify the x and y positions of all nonzero pixels in the image
- nonzero = self._binary_warped.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
- # Current positions to be updated for each window
- leftx_current = leftx_base
- rightx_current = rightx_base
- # Set the width of the windows +/- margin
- margin = 100
- # Set minimum number of pixels found to recenter window
- minpix = 20
- # Create empty lists to receive left and right lane pixel indices
- left_lane_inds = []
- right_lane_inds = []
-
- # Step through the windows one by one
- for window in range(nwindows):
- # Identify window boundaries in x and y (and right and left)
- win_y_low = self._binary_warped.shape[0] - (window+1)*window_height
- win_y_high = self._binary_warped.shape[0] - window*window_height
- win_xleft_low = leftx_current - margin
- win_xleft_high = leftx_current + margin
- win_xright_low = rightx_current - margin
- win_xright_high = rightx_current + margin
- # Draw the windows on the visualization image
- cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
- (0,255,0), 2)
- cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
- (0,255,0), 2)
- # Identify the nonzero pixels in x and y within the window
- good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
- (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
- good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
- (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
- # Append these indices to the lists
- left_lane_inds.append(good_left_inds)
- right_lane_inds.append(good_right_inds)
- # If you found > minpix pixels, recenter next window on their mean position
- if len(good_left_inds) > minpix:
- leftx_current = np.int32(np.mean(nonzerox[good_left_inds]))
- if len(good_right_inds) > minpix:
- rightx_current = np.int32(np.mean(nonzerox[good_right_inds]))
-
- # Concatenate the arrays of indices
- try:
- left_lane_inds = np.concatenate(left_lane_inds)
- right_lane_inds = np.concatenate(right_lane_inds)
- except ValueError:
- pass
-
-
- # Extract left and right line pixel positions
- leftx = nonzerox[left_lane_inds]
- lefty = nonzeroy[left_lane_inds]
- rightx = nonzerox[right_lane_inds]
- righty = nonzeroy[right_lane_inds]
-
-
- # Fit a second order polynomial to each
- if(lefty.any() and leftx.any()):
- self._left_fit = np.polyfit(lefty, leftx, 2)
- else:
- return None
- if(righty.any() and rightx.any()):
- self._right_fit = np.polyfit(righty, rightx, 2)
- else:
- return None
-
- # Ideas. Either use a from pervious point thing
- # simple idea: just filter out the empty spots
-
- if(smoothen):
- global fit_prev_left
- global fit_prev_right
- global fit_sum_left
- global fit_sum_right
- if(len(fit_prev_left)>prevFrameCount):
- fit_sum_left-= fit_prev_left.pop(0)
- fit_sum_right-= fit_prev_right.pop(0)
-
- fit_prev_left.append(self._left_fit)
- fit_prev_right.append(self._right_fit)
- fit_sum_left+=self._left_fit
- fit_sum_right+= self._right_fit
-
- no_of_fit_values=len(fit_prev_left)
- self._left_fit= fit_sum_left/no_of_fit_values
- self._right_fit= fit_sum_right/no_of_fit_values
-
-
- ploty = np.linspace(0, self._binary_warped.shape[0]-1, self._binary_warped.shape[0] )
- left_fitx = self._left_fit[0]*ploty**2 + self._left_fit[1]*ploty + self._left_fit[2]
- right_fitx = self._right_fit[0]*ploty**2 + self._right_fit[1]*ploty + self._right_fit[2]
-
- out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
- out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
-
- nonzero = self._binary_warped.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
-
- window_img = np.zeros_like(out_img)
- # Generate a polygon to illustrate the search window area
- # And recast the x and y points into usable format for cv2.fillPoly()
- left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
- left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
- ploty])))])
- left_line_pts = np.hstack((left_line_window1, left_line_window2))
- right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
- right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
- ploty])))])
- right_line_pts = np.hstack((right_line_window1, right_line_window2))
-
- # Draw the lane onto the warped blank image
- cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
- cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
-
- left_line_pts = np.array([np.transpose(np.vstack([left_fitx, ploty]))], dtype=np.int32)
- right_line_pts = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))], dtype=np.int32)
-
- cv2.polylines(out_img, left_line_pts, isClosed=False, color=(0, 255, 255), thickness=3)
- cv2.polylines(out_img, right_line_pts, isClosed=False, color=(0, 255, 255), thickness=3)
-
- result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
-
- return out_img, result, left_fitx,right_fitx,ploty,self._left_fit, self._right_fit, \
- left_lane_inds,right_lane_inds,lane_width
-
- def measure_position_meters(self):
- # Define conversion in x from pixels space to meters
-
- # center is roughly 0.005
- xm_per_pix = 1 # meters per pixel in x dimension
- # Choose the y value corresponding to the bottom of the image
- y_max = self._binary_warped.shape[0]
- # Calculate left and right line positions at the bottom of the image
- left_x_pos = self._left_fit[0]*y_max**2 + self._left_fit[1]*y_max + self._left_fit[2]
- right_x_pos = self._right_fit[0]*y_max**2 + self._right_fit[1]*y_max + self._right_fit[2]
- # Calculate the x position of the center of the lane
- center_lanes_x_pos = (left_x_pos + right_x_pos)//2
- # Calculate the deviation between the center of the lane and the center of the picture
- # The car is assumed to be placed in the center of the picture
- # If the deviation is negative, the car is on the felt hand side of the center of the lane
- veh_pos = ((self._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix
- veh_pos = abs(veh_pos) / 40000
- return veh_pos
-
-
-
-
-
-if __name__ == "__main__":
- cv2.namedWindow("Trackbars")
-
- cv2.createTrackbar("L - H", "Trackbars", 0, 255, nothing)
- cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
- cv2.createTrackbar("L - V", "Trackbars", 200, 255, nothing)
- cv2.createTrackbar("U - H", "Trackbars", 255, 255, nothing)
- cv2.createTrackbar("U - S", "Trackbars", 50, 255, nothing)
- cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
- Detector = Lane_Follower()
-
- while success:
- success, image = vidcap.read()
- frame = cv2.resize(image, (640,480))
-
- # ## Choosing points for perspective transformation
- # THESE ARE PRESET, MODIFYING
- # tl = (222,387)
- # bl = (70 ,472)
- # tr = (400,380)
- # br = (538,472)
- bl = (12,355)
- tl = (66,304)
- br = (635,344)
- tr = (595,308)
-
-
- cv2.circle(frame, tl, 5, (0,0,255), -1)
- cv2.circle(frame, bl, 5, (0,0,255), -1)
- cv2.circle(frame, tr, 5, (0,0,255), -1)
- cv2.circle(frame, br, 5, (0,0,255), -1)
-
- ## Aplying perspective transformation
- pts1 = np.float32([tl, bl, tr, br])
- pts2 = np.float32([[0, 0], [0, 480], [640, 0], [640, 480]])
-
- # Matrix to warp the image for birdseye window
- matrix = cv2.getPerspectiveTransform(pts1, pts2)
- transformed_frame = cv2.warpPerspective(frame, matrix, (640,480))
-
- ### Object Detection
- # Image Thresholding
- hsv_transformed_frame = cv2.cvtColor(transformed_frame, cv2.COLOR_BGR2HSV)
-
- l_h = cv2.getTrackbarPos("L - H", "Trackbars")
- l_s = cv2.getTrackbarPos("L - S", "Trackbars")
- l_v = cv2.getTrackbarPos("L - V", "Trackbars")
- u_h = cv2.getTrackbarPos("U - H", "Trackbars")
- u_s = cv2.getTrackbarPos("U - S", "Trackbars")
- u_v = cv2.getTrackbarPos("U - V", "Trackbars")
-
- lower = np.array([l_h,l_s,l_v])
- upper = np.array([u_h,u_s,u_v])
- mask = cv2.inRange(hsv_transformed_frame, lower, upper)
-
- Detector.set_binwarp(binwarp=mask)
-
-
- #Histogram
- histogram = np.sum(mask[mask.shape[0]//2:, :], axis=0)
- midpoint = np.int32(histogram.shape[0]/2)
- left_base = np.argmax(histogram[:midpoint])
- right_base = np.argmax(histogram[midpoint:]) + midpoint
-
- #Sliding Window
- y = 472
- lx = []
- rx = []
-
- # Set the width of the windows +/- margin
- margin = 100
- # Set minimum number of pixels found to recenter window
- minpix = 50
-
- msk = mask.copy()
- output = Detector.Plot_line()
- if(output is not None):
- out_img, result, left_fitx,right_fitx, \
- ploty,left_fit, right_fit, left_lane_inds,right_lane_inds,lane_width = output
- pos = Detector.measure_position_meters()
- print(pos)
- while y>0:
- nonzero = msk.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
- # print(nonzero)
-
-
- ## Left threshold
- img = mask[y-40:y, left_base-50:left_base+50]
- contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
- for contour in contours:
- M = cv2.moments(contour)
- if M["m00"] != 0:
- cx = int(M["m10"]/M["m00"])
- cy = int(M["m01"]/M["m00"])
- lx.append(left_base-50 + cx)
- left_base = left_base-50 + cx
-
- ## Right threshold
- img = mask[y-40:y, right_base-50:right_base+50]
- contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
- for contour in contours:
- M = cv2.moments(contour)
- if M["m00"] != 0:
- cx = int(M["m10"]/M["m00"])
- cy = int(M["m01"]/M["m00"])
- rx.append(right_base-50 + cx)
- right_base = right_base-50 + cx
-
-
-
- cv2.rectangle(msk, (left_base-50,y), (left_base+50,y-40), (255,255,255), 2)
- cv2.rectangle(msk, (right_base-50,y), (right_base+50,y-40), (255,255,255), 2)
- y -= 40
- try:
- leftx = nonzerox[lx]
- lefty = nonzeroy[lx]
- rightx = nonzerox[rx]
- righty = nonzeroy[rx]
- except:
- print("error this time around, retrying")
- # cv2.imshow("Outimg" , out_img)
- cv2.imshow("R",result)
-
- cv2.imshow("Original", frame)
- cv2.imshow("Bird's Eye View", transformed_frame)
- cv2.imshow("Lane Detection - Image Thresholding", mask)
- cv2.imshow("Lane Detection - Sliding Windows", msk)
- # Display the result
- if cv2.waitKey(10) == 27:
- break
-
- # if bool(lx) and bool(rx):
- # left_fit = np.polyfit(lefty, leftx, 2)
- # right_fit = np.polyfit(righty, rightx, 2)
-
- # # Generate y values for the entire height of the image
- # ploty = np.linspace(0, transformed_frame.shape[0] - 1, transformed_frame.shape[0])
-
- # # Generate x values using the polynomial fits
- # left_fitx = np.polyval(left_fit, ploty)
- # right_fitx = np.polyval(right_fit, ploty)
-
- # # Create an image to draw the lane lines
- # line_image = np.zeros_like(msk)
-
- # # Draw the left lane line
- # for i in range(len(left_fitx)):
- # cv2.circle(line_image, (int(left_fitx[i]), int(ploty[i])), 1, 255, -1)
-
- # # Draw the right lane line
- # for i in range(len(right_fitx)):
- # cv2.circle(line_image, (int(right_fitx[i]), int(ploty[i])), 1, 255, -1)
-
- # # Combine the original image with the drawn lane lines
- # result = cv2.addWeighted(mask, 1, cv2.cvtColor(line_image, cv2.COLOR_GRAY2BGR), 0.3, 0)
diff --git a/ros2/lanefollowingtest/SobelVidCam.py b/ros2/lanefollowingtest/SobelVidCam.py
deleted file mode 100644
index 57acfd4cd..000000000
--- a/ros2/lanefollowingtest/SobelVidCam.py
+++ /dev/null
@@ -1,665 +0,0 @@
-import numpy as np
-import cv2
-import matplotlib.pyplot as plt
-from moviepy.editor import VideoFileClip
-from queue import Queue
-import os
-import glob
-from whitepxlClass import WhitePixelDetector
-
-
-
-class CameraCalibration:
- def __init__(self, nx, ny, chessboard_dir,SHOW_PLOT=False):
- self.NX = nx
- self.NY = ny
- self.CHESSBOARD_DIR = chessboard_dir
- self.show_plot = SHOW_PLOT
-
- def get_chessboard_corners(self, imgs):
- """Return image and object points from a set of chessboard images."""
- if not isinstance(imgs, list):
- raise ValueError("imgs parameter needs to be a list.")
-
- # Initialize 3D object points
- objp = np.zeros((self.NX * self.NY, 3), np.float32)
- objp[:, :2] = np.mgrid[0:self.NX, 0:self.NY].T.reshape(-1, 2)
-
- imgps = []
- objps = []
-
- for img in imgs:
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- found, corners = cv2.findChessboardCorners(gray, (self.NX, self.NY), None)
- if not found:
- continue
- imgps.append(corners)
- objps.append(objp)
-
- return imgps, objps
-
- def chessboard_cam_calib(self, imgps, objps, img_size):
- """Returns camera calibration matrix and distortion coefficients."""
- ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objps, imgps, img_size, None, None)
- return mtx, dist
-
- def plots_and_calibration(self):
- img_locs = glob.glob(self.CHESSBOARD_DIR)
- # print(img_locs)
-
- # Read all images into a list
- imgs = [cv2.imread(img_loc) for img_loc in img_locs]
-
-
-
- # Get size of images using one image as a sample, with width as the first element
- img_size = imgs[0].shape[::-1][1:]
-
- # Calibrate camera and retrieve calibration matrix and distortion coefficients
- imgps, objps = self.get_chessboard_corners(imgs)
- MTX, DIST = self.chessboard_cam_calib(imgps, objps, img_size)
-
- if self.show_plot:
-
- # Set up figure for plotting
- f, axarr = plt.subplots(len(imgs), 3)
- f.set_size_inches(15, 30)
-
- # Loop through images, undistort them, and draw corners on undistorted versions.
- for i, img in enumerate(imgs):
- # Set column headings on figure
- if i == 0:
- axarr[i, 0].set_title("Original Image")
- axarr[i, 1].set_title("Undistorted Image")
- axarr[i, 2].set_title("Corners Drawn")
-
- # Generate new undistorted image
- undist = cv2.undistort(img, MTX, DIST, None, MTX)
- undist_copy = undist.copy()
-
- # Generate new image with corner points drawn
- undist_grey = cv2.cvtColor(undist_copy, cv2.COLOR_BGR2GRAY)
- found, corners = cv2.findChessboardCorners(undist_grey, (self.NX, self.NY), None)
-
- if found:
- drawn = cv2.drawChessboardCorners(undist_copy, (self.NX, self.NY), corners, found)
- else:
- drawn = img
-
- # Plot images on figure
- axarr[i, 0].imshow(img)
- axarr[i, 0].axis('off')
- axarr[i, 1].imshow(undist)
- axarr[i, 1].axis('off')
- axarr[i, 2].imshow(drawn)
- axarr[i, 2].axis('off')
- return MTX,DIST
-
-
-def warp_image(img,src,dst):
- img_size = (img.shape[1], img.shape[0])
- M= cv2.getPerspectiveTransform(src, dst)
- inv= cv2.getPerspectiveTransform(dst, src)
- warped= cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
- return warped,inv
-
-def reverse_warping(img,M):
- img_size = (img.shape[1], img.shape[0])
- unwarped= cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
- return unwarped
-
-def channelwise_thresholding(image,thresh):
- image = image*(255/np.max(image))
- # print(image)
- # 2) Apply a threshold to the L channel
- binary_output = np.zeros_like(image)
- binary_output[(image > thresh[0]) & (image <= thresh[1])] = 1
- return binary_output
-
-def Custom_channel_converter(img):
-
- img1=cv2.cvtColor(img,cv2.COLOR_RGB2YCrCb)[:,:,0] # Y channel
- img2=cv2.cvtColor(img,cv2.COLOR_RGB2YCrCb)[:,:,1] #Cr channel
- img3=cv2.cvtColor(img,cv2.COLOR_RGB2HLS)[:,:,1] #L channel
- img4=cv2.cvtColor(img,cv2.COLOR_RGB2HLS)[:,:,2] #S channel
- return img1, img2, img3, img4
-
-def sobel_image(img, orient='x', thresh_min=0, thresh_max=255, convert=True):
-
-
- gray= img
- if(convert):
- gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
-
- sobel=None
- if(orient=='x'):
- sobel= cv2.Sobel(gray, cv2.CV_64F, 1,0)
- else:
- sobel= cv2.Sobel(gray, cv2.CV_64F, 0,1)
-
- sobel_abs= np.absolute(sobel)
- sobel_8bit= np.uint8(255* sobel_abs/np.max(sobel_abs))
- binary_output= np.zeros_like(sobel_8bit)
- binary_output[(sobel_8bit>=thresh_min) & (thresh_max>=sobel_8bit)]=1
-
- return binary_output
-
-def sobel_gradient_image(img, thresh=(0, np.pi/2), convert=True):
- gray= img
- if(convert):
- gray= cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
-
- sobelx= cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=15)
- sobely= cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=15)
-
- abs_sobelx= np.absolute(sobelx)
- abs_sobely= np.absolute(sobely)
-
- grad= np.arctan2(abs_sobely, abs_sobelx)
-
- binary_output=np.zeros_like(grad)
- binary_output[(grad>thresh[0])&(grad= win_y_low) & (nonzeroy < win_y_high) &
- (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
- good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
- (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
- # Append these indices to the lists
- left_lane_inds.append(good_left_inds)
- right_lane_inds.append(good_right_inds)
- # If you found > minpix pixels, recenter next window on their mean position
- if len(good_left_inds) > minpix:
- leftx_current = np.int32(np.mean(nonzerox[good_left_inds]))
- if len(good_right_inds) > minpix:
- rightx_current = np.int32(np.mean(nonzerox[good_right_inds]))
-
- # Concatenate the arrays of indices
- left_lane_inds = np.concatenate(left_lane_inds)
- right_lane_inds = np.concatenate(right_lane_inds)
-
- # Extract left and right line pixel positions
- leftx = nonzerox[left_lane_inds]
- lefty = nonzeroy[left_lane_inds]
- rightx = nonzerox[right_lane_inds]
- righty = nonzeroy[right_lane_inds]
-
- # Fit a second order polynomial to each
- left_fit = np.polyfit(lefty, leftx, 2)
- right_fit = np.polyfit(righty, rightx, 2)
-
- if(smoothen):
- global fit_prev_left
- global fit_prev_right
- global fit_sum_left
- global fit_sum_right
- if(len(fit_prev_left)>prevFrameCount):
- fit_sum_left-= fit_prev_left.pop(0)
- fit_sum_right-= fit_prev_right.pop(0)
-
- fit_prev_left.append(left_fit)
- fit_prev_right.append(right_fit)
- fit_sum_left+=left_fit
- fit_sum_right+= right_fit
-
- no_of_fit_values=len(fit_prev_left)
- left_fit= fit_sum_left/no_of_fit_values
- right_fit= fit_sum_right/no_of_fit_values
-
-
- ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
- left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
- right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
-
- out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
- out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
-
- nonzero = binary_warped.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
-
- window_img = np.zeros_like(out_img)
- # Generate a polygon to illustrate the search window area
- # And recast the x and y points into usable format for cv2.fillPoly()
- left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
- left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
- ploty])))])
- left_line_pts = np.hstack((left_line_window1, left_line_window2))
- right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
- right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
- ploty])))])
- right_line_pts = np.hstack((right_line_window1, right_line_window2))
-
- # Draw the lane onto the warped blank image
- cv2.fillPoly(window_img, np.int32([left_line_pts]), (0,255, 0))
- cv2.fillPoly(window_img, np.int32([right_line_pts]), (0,255, 0))
- result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
-
- return out_img, result, left_fitx,right_fitx,ploty,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width
-
-def draw_lane(original_img, Combined_img, left_fitx, right_fitx, M):
- new_img = np.copy(original_img)
- warp_zero = np.zeros_like(Combined_img).astype(np.uint8)
- color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
-
- h,w = Combined_img.shape
- ploty = np.linspace(0, h-1, num=h)
-
- # print("left_fitx shape:", left_fitx.shape)
- # print("ploty shape:", ploty.shape)
-
- if len(left_fitx) != len(ploty):
- # Adjust the length of left_fitx to match the length of ploty
- left_fitx = left_fitx[:len(ploty)]
-
- if len(right_fitx) != len(ploty):
- # Adjust the length of left_fitx to match the length of ploty
- right_fitx = right_fitx[:len(ploty)]
-
-
- pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
- pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
- pts = np.hstack((pts_left, pts_right))
-
- cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
- cv2.polylines(color_warp, np.int32([pts_left]), isClosed=False, color=(255,0,0), thickness=15)
- cv2.polylines(color_warp, np.int32([pts_right]), isClosed=False, color=(255,0,0), thickness=15)
-
- return color_warp, new_img
-
-center_distances= Queue(maxsize=15)
-distanceSum=0
-def get_car_position(l_fit, r_fit,w,h):
- xm_per_pix=3.7/700
-
- center_dist=0
- lane_center_position=0
- if r_fit is not None and l_fit is not None:
- car_position = w/2
- l_fit_x_int = l_fit[0]*h**2 + l_fit[1]*h + l_fit[2]
- r_fit_x_int = r_fit[0]*h**2 + r_fit[1]*h + r_fit[2]
- lane_center_position = (r_fit_x_int + l_fit_x_int) /2
- center_dist = (car_position - lane_center_position) * xm_per_pix
-
-
- global distanceSum
- if(center_distances.full()):
- el=center_distances.get()
- distanceSum-=el
-
- center_distances.put(center_dist)
- distanceSum+=center_dist
-
- no_of_distance_values=center_distances.qsize()
- center_dist= distanceSum/no_of_distance_values
- return center_dist,lane_center_position
-
-def get_direction(center_dist):
- direction = ''
- if center_dist > 0:
- direction = 'right'
- elif center_dist < 0:
- direction = 'left'
- return direction
-
-def Plot_details(laneImage,curv_rad,center_dist,width_lane,lane_center_position):
- offest_top=0
- copy= np.zeros_like(laneImage)
-
- h = laneImage.shape[0]
- font = cv2.FONT_HERSHEY_COMPLEX_SMALL
- text = 'Curve radius: ' + '{:04.2f}'.format(curv_rad) + 'cm'
- cv2.putText(laneImage, text, (40,70+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
- cv2.putText(copy, text, (40,100+offest_top), font, 4.0, (255,255,255), 3, cv2.LINE_AA)
-
- abs_center_dist = abs(center_dist)
- direction= get_direction(center_dist)
- text = '{:04.3f}'.format(abs_center_dist) + 'm ' + direction + ' of center'
-# cv2.putText(laneImage, 'steering '+direction, (40,110+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
- cv2.putText(laneImage, '|', (640,710), font, 2.0, (255,255,255), 3, cv2.LINE_AA)
- cv2.putText(laneImage, '|', (int(lane_center_position),680), font, 2.0, (255,0,0), 3, cv2.LINE_AA)
- cv2.putText(laneImage, text, (40,120+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
-
- text = 'Lane Width: ' + '{:04.2f}'.format(width_lane) + 'm'
- cv2.putText(laneImage, text, (40,170+offest_top), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
- cv2.putText(copy, text, (40,280+offest_top), font, 4.0, (255,255,255), 3, cv2.LINE_AA)
-
- return laneImage, copy
-
-width_lane_avg=[]
-radius_values = Queue(maxsize=15)
-radius_sum=0
-
-def calc_radius_position(combined, l_fit, r_fit, l_lane_inds, r_lane_inds,lane_width):
-
- # Define conversions in x and y from pixels space to meters
- ym_per_pix = (30*100)/720 # meters per pixel in y dimension
- xm_per_pix = (3.7*100)/700 # meters per pixel in x dimension
- left_curverad, right_curverad, center_dist, width_lane = (0, 0, 0, 0)
- h = combined.shape[0]
- w = combined.shape[1]
- ploty = np.linspace(0, h-1, h)
- y_eval = np.max(ploty)
-
- # Identify the x and y positions of all nonzero pixels in the image
- nonzero = combined.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
-
- # Extract left and right line pixel positions
- leftx = nonzerox[l_lane_inds]
- lefty = nonzeroy[l_lane_inds]
- rightx = nonzerox[r_lane_inds]
- righty = nonzeroy[r_lane_inds]
-
- if len(leftx) != 0 and len(rightx) != 0:
- # Fit new polynomials to x,y in world space
- left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
- right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
-
- #applying the formula for
- left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
- right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
-
- width_lane= lane_width*xm_per_pix
- if(len(width_lane_avg) != 0):
- avg_width=(sum(width_lane_avg)/len(width_lane_avg))
- if abs(avg_width-width_lane)<0.5:
- width_lane_avg.append(width_lane)
- else:
- width_lane=avg_width
-
-
- # Averaging radius value over past 15 frames
- global radius_sum
- if(radius_values.full()):
- el=radius_values.get()
-
- radius_sum-=el
- curve_radius= (left_curverad+right_curverad)/2
- radius_values.put(curve_radius)
- radius_sum+=curve_radius
-
- no_of_radius_values=radius_values.qsize()
- curve_radius= radius_sum/no_of_radius_values
-# print(curve_radius, radius_sum,no_of_radius_values)
-
- center_dist,lane_center_position= get_car_position(l_fit,r_fit,w,h) #getting the car distance from the center
- return curve_radius, center_dist,width_lane,lane_center_position
-
-def undistort(img,mtx,dist):
- return cv2.undistort(img,mtx,dist, None, mtx)
-
-
-def Lane_pipeline(img,smoothen,prevFrameCount):
- undistorted_image= undistort(img,mtx,dist)
- warped_image,M= warp_image(undistorted_image)
- image_S_channel= cv2.cvtColor(warped_image, cv2.COLOR_RGB2HLS)[:,:,2]
-
- imgY, imgCr, imgb, imgS= Custom_channel_converter(warped_image)
-
- Ybinary= channelwise_thresholding(imgY,(215,255))
- Crbinary= channelwise_thresholding(imgCr,(215,255))
- Lbinary= channelwise_thresholding(imgb,(215,255))
- Sbinary= channelwise_thresholding(imgS,(200,255))
- combined = np.zeros_like(imgY)
-
-
-# sobel_mag_image= sobel_mag(image_S_channel, (15,60), False)
- # sobel_image1= sobel_image(image_S_channel,'x', 15,60, False)
- # sobel_grad_image= sobel_gradient_image(image_S_channel, (0.5,1.8), False)
- # combined[(Crbinary==1)|(Ybinary==1)|((Lbinary==1)&(Sbinary==1))] = 1
-# |((sobel_image1==1) & (sobel_grad_image==1))
-# plt.imshow(combined)
-# combined[]=1
-
-# |((sobel_image1==1)&(sobel_grad_image==1))
-# ((sobel_mag_image == 1) & (sobel_grad_image == 0))
-
-# out_img,out_img1, left_fitx,right_fitx,ploty,left_curverad,right_curverad,center_dist= Plot_line(combined)
- out_img,out_img1, left_fitx,right_fitx,ploty,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width= Plot_line(Ybinary,smoothen,prevFrameCount)
- curverad,center_dist,width_lane,lane_center_position= calc_radius_position(combined,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width)
- laneImage,new_img =draw_lane(img, combined, left_fitx, right_fitx, M)
- unwarped_image= reverse_warping(laneImage,M)
- laneImage = cv2.addWeighted(new_img, 1, unwarped_image, 0.5, 0)
- laneImage, copy = Plot_details(laneImage,curverad,center_dist,width_lane,lane_center_position)
- # print(center_dist)
- return img,out_img,out_img1,unwarped_image,laneImage,combined,copy
-
-
-
-def CallPipeline(image):
- smoothen= True
- prevFrameCount=4
- rgb_image,out_img,out_img1,unwarped_image,laneImage,combined,data_copy= Lane_pipeline(image,smoothen,prevFrameCount)
-
- out_image = np.zeros((720,1280,3), dtype=np.uint8)
-
- #stacking up various images in one output Image
- out_image[0:720,0:1280,:] = cv2.resize(laneImage,(1280,720)) #top-left
- out_image[20:190,960:1260,:] = cv2.resize(np.dstack((combined*255, combined*255, combined*255)),(300,170))#side Panel
- out_image[210:380,960:1260,:] = cv2.resize(out_img,(300,170))#side Panel
-# out_image[400:570,960:1260,:] = cv2.resize(data_copy,(300,170))#bottom-left
- return out_image
-
-
-
-def main():
- # Set parameters
- cap = cv2.VideoCapture("/dev/video2")
-
-
- nx = 9
- ny = 6
- chessboard_dir = './camera_cal_adesso/*.jpg'
-
- # Create an instance of the CameraCalibration class
- calibrator = CameraCalibration(nx, ny, chessboard_dir)
-
-
- # Run the calibration and plotting
- MTX,DIST = calibrator.plots_and_calibration()
- mtx = MTX
- dist = DIST
-
-
- ret = True
- while ret:
- ret, frame = cap.read()
- lane_test_img = frame
-
- offset=10
- height, width= frame.shape[0], frame.shape[1]
-
- lane_test_undist = cv2.undistort(lane_test_img, MTX, DIST, None, MTX)
- # lane_test_undist = cv2.cvtColor(lane_test_img, cv2.COLOR_BGR2RGB)
- reg_distort = np.hstack((frame, lane_test_undist))
- white_pixel_detector = WhitePixelDetector(lane_test_undist)
-
-
-
- result = white_pixel_detector.process_frame()
- if result:
- frame, tl_pt, tr_pt, bl_pt, br_pt = result
- # print("Top Left:", tl_pt)
- # print("Top Right:", tr_pt)
- # print("Bottom Left:", bl_pt)
- # print("Bottom Right:", br_pt)
-
- src = np.float32([
- tl_pt, # Top-left corner
- bl_pt, # Bottom-left corner
- br_pt, # Bottom-right corner
- tr_pt # Top-right corner
- ])
-
- dst=np.float32([(offset,0),(width-offset,0),(width-offset,height),(offset,height)])
-
- unwarped_image,M= warp_image(lane_test_img,src,dst)
- cv2.imshow("Bird's Eye View", unwarped_image)
-
- imgY, imgCr, imgL, imgS = Custom_channel_converter(unwarped_image)
-
- result_image = np.zeros((2 * imgY.shape[0], 2 * imgY.shape[1]), dtype=np.uint8)
-
-
- # Place each frame in the result image
- result_image[0:imgY.shape[0], 0:imgY.shape[1]] = imgY
- result_image[0:imgCr.shape[0], imgY.shape[1]:] = imgCr
- result_image[imgY.shape[0]:, 0:imgL.shape[1]] = imgL
- result_image[imgY.shape[0]:, imgL.shape[1]:] = imgS
-
-
- # Display labels on each section
- cv2.putText(result_image, 'imgY', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
- cv2.putText(result_image, 'imgCr', (imgY.shape[1] + 10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
- cv2.putText(result_image, 'imgL', (10, imgY.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
- cv2.putText(result_image, 'imgS', (imgY.shape[1] + 10, imgY.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
-
-
-
- Ybinary= channelwise_thresholding(imgY,(220,255))
- Crbinary= channelwise_thresholding(imgCr,(220,255))
- Lbinary= channelwise_thresholding(imgL,(220,252))
- Sbinary= channelwise_thresholding(imgS,(220,255))
-
- Ybinary= cv2.bitwise_not(Ybinary)
- Crbinary= cv2.bitwise_not(Crbinary)
- Lbinary= cv2.bitwise_not(Lbinary)
- Sbinary= cv2.bitwise_not(Sbinary)
- combined = np.zeros_like(imgY)
-
- warped_image,M= warp_image(lane_test_img,src,dst)
-
- image_S_channel= cv2.cvtColor(warped_image, cv2.COLOR_RGB2HLS)[:,:,2]
-
- sobel_image1= sobel_image(image_S_channel,'x', 15,60, False)
- sobel_grad_image= sobel_gradient_image(image_S_channel, (0.5,1.8), False)
- combined[(Crbinary==1)|(Ybinary==1)|((Lbinary==1)&(Sbinary==1))] = 1
- combined = cv2.bitwise_not(combined)
-
- cv2.imshow("PLS",combined)
-
-
-
- # Create a blank image to display the binary images
- result_binary_image = np.zeros((2 * Ybinary.shape[0], 2 * Ybinary.shape[1]), dtype=np.uint8)
-
- # Place each binary image in the result image
- result_binary_image[0:Ybinary.shape[0], 0:Ybinary.shape[1]] = Ybinary
- result_binary_image[0:Crbinary.shape[0], Ybinary.shape[1]:] = Crbinary
- result_binary_image[Ybinary.shape[0]:, 0:Lbinary.shape[1]] = Lbinary
- result_binary_image[Ybinary.shape[0]:, Lbinary.shape[1]:] = Sbinary
-
- # Display labels on each section
- cv2.putText(result_binary_image, 'Ybinary', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
- cv2.putText(result_binary_image, 'Crbinary', (Ybinary.shape[1] + 10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
- cv2.putText(result_binary_image, 'Lbinary', (10, Ybinary.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
- cv2.putText(result_binary_image, 'Sbinary', (Ybinary.shape[1] + 10, Ybinary.shape[0] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)
-
- # Display the result binary image
- cv2.imshow('Four Binary Images Display', result_binary_image)
- cv2.imshow('Color Spaces', result_image)
-
- global fit_prev_left
- global fit_prev_right
- global fit_sum_left
- global fit_sum_right
- fit_prev_left=[]
- fit_prev_right=[]
- fit_sum_left=0
- fit_sum_right=0
-
- out_img, out_img1, left_fitx,right_fitx,ploty,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width = Plot_line(Ybinary)
-
-
- # Generate x and y values for plotting
- ploty = np.linspace(0, result_binary_image.shape[0]-1, result_binary_image.shape[0])
- left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
- right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
-
- for y, left_x, right_x in zip(ploty.astype(int), left_fitx.astype(int), right_fitx.astype(int)):
- cv2.circle(out_img, (left_x, y), 2, (255, 0, 0), -1) # Draw left line in red
- cv2.circle(out_img, (right_x, y), 2, (0, 255, 255), -1) # Draw right line in yellow
-
- # we may have to do some proccessing of our own. All it needs is something detected thats white.
- cv2.imshow("PLYFIT",out_img)
-
- curverad,center_dist,width_lane,lane_center_position= calc_radius_position(Ybinary,left_fit, right_fit,left_lane_inds,right_lane_inds,lane_width)
- laneImage,new_img =draw_lane(frame, Ybinary, left_fitx, right_fitx, M)
- unwarped_image= reverse_warping(laneImage,M)
- laneImage = cv2.addWeighted(new_img, 1, unwarped_image, 0.5, 0)
- laneImage, copy = Plot_details(laneImage,curverad,center_dist,width_lane,lane_center_position)
- print(center_dist)
-
- cv2.imshow("UMMA",laneImage)
-
- # # Display the combined frame
- # cv2.imshow('Combined Video Feed', reg_distort)
-
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
-
-
-
-
-
-
- # Release the webcam and close all windows
- cap.release()
- cv2.destroyAllWindows()
-
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/ros2/lanefollowingtest/VidCam.py b/ros2/lanefollowingtest/VidCam.py
deleted file mode 100644
index b00d94eb8..000000000
--- a/ros2/lanefollowingtest/VidCam.py
+++ /dev/null
@@ -1,457 +0,0 @@
-import os
-import glob
-
-import cv2
-import numpy as np
-import matplotlib.pyplot as plt
-from moviepy.editor import VideoFileClip
-from whitepxlClass import WhitePixelDetector
-
-
-GRADIENT_THRESH = (20, 100)
-S_CHANNEL_THRESH = (80, 255)
-L_CHANNEL_THRESH = (80, 255)
-
-# NEED TO BE
-B_CHANNEL_THRESH = (150, 200)
-L2_CHANNEL_THRESH = (200, 200)
-
-
-class CameraCalibration:
- def __init__(self, nx, ny, chessboard_dir,SHOW_PLOT=False):
- self.NX = nx
- self.NY = ny
- self.CHESSBOARD_DIR = chessboard_dir
- self.show_plot = SHOW_PLOT
-
- def get_chessboard_corners(self, imgs):
- """Return image and object points from a set of chessboard images."""
- if not isinstance(imgs, list):
- raise ValueError("imgs parameter needs to be a list.")
-
- # Initialize 3D object points
- objp = np.zeros((self.NX * self.NY, 3), np.float32)
- objp[:, :2] = np.mgrid[0:self.NX, 0:self.NY].T.reshape(-1, 2)
-
- imgps = []
- objps = []
-
- for img in imgs:
- gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- found, corners = cv2.findChessboardCorners(gray, (self.NX, self.NY), None)
- if not found:
- continue
- imgps.append(corners)
- objps.append(objp)
-
- return imgps, objps
-
- def chessboard_cam_calib(self, imgps, objps, img_size):
- """Returns camera calibration matrix and distortion coefficients."""
- ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objps, imgps, img_size, None, None)
- return mtx, dist
-
- def plots_and_calibration(self):
- img_locs = glob.glob(self.CHESSBOARD_DIR)
- print(img_locs)
-
- # Read all images into a list
- imgs = [cv2.imread(img_loc) for img_loc in img_locs]
-
-
-
- # Get size of images using one image as a sample, with width as the first element
- img_size = imgs[0].shape[::-1][1:]
-
- # Calibrate camera and retrieve calibration matrix and distortion coefficients
- imgps, objps = self.get_chessboard_corners(imgs)
- MTX, DIST = self.chessboard_cam_calib(imgps, objps, img_size)
-
- if self.show_plot:
-
- # Set up figure for plotting
- f, axarr = plt.subplots(len(imgs), 3)
- f.set_size_inches(15, 30)
-
- # Loop through images, undistort them, and draw corners on undistorted versions.
- for i, img in enumerate(imgs):
- # Set column headings on figure
- if i == 0:
- axarr[i, 0].set_title("Original Image")
- axarr[i, 1].set_title("Undistorted Image")
- axarr[i, 2].set_title("Corners Drawn")
-
- # Generate new undistorted image
- undist = cv2.undistort(img, MTX, DIST, None, MTX)
- undist_copy = undist.copy()
-
- # Generate new image with corner points drawn
- undist_grey = cv2.cvtColor(undist_copy, cv2.COLOR_BGR2GRAY)
- found, corners = cv2.findChessboardCorners(undist_grey, (self.NX, self.NY), None)
-
- if found:
- drawn = cv2.drawChessboardCorners(undist_copy, (self.NX, self.NY), corners, found)
- else:
- drawn = img
-
- # Plot images on figure
- axarr[i, 0].imshow(img)
- axarr[i, 0].axis('off')
- axarr[i, 1].imshow(undist)
- axarr[i, 1].axis('off')
- axarr[i, 2].imshow(drawn)
- axarr[i, 2].axis('off')
- return MTX,DIST
-
-
-def seperate_hls(rgb_img):
- hls = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2HLS)
- h = hls[:,:,0]
- l = hls[:,:,1]
- s = hls[:,:,2]
- return h, l, s
-
-def seperate_lab(rgb_img):
- lab = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2Lab)
- l = lab[:,:,0]
- a = lab[:,:,1]
- b = lab[:,:,2]
- return l, a, b
-
-def seperate_luv(rgb_img):
- luv = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2Luv)
-
- l = luv[:,:,0]
- u = luv[:,:,1]
- v = luv[:,:,2]
- return l, u, v
-
-def binary_threshold_lab_luv(rgb_img, bthresh, lthresh):
- l, a, b = seperate_lab(rgb_img)
- l2, u, v = seperate_luv(rgb_img)
- binary = np.zeros_like(l)
- binary[
- ((b > bthresh[0]) & (b <= bthresh[1])) |
- ((l2 > lthresh[0]) & (l2 <= lthresh[1]))
- ] = 1
- return binary
-
-def binary_threshold_hls(rgb_img, sthresh, lthresh):
- h, l, s = seperate_hls(rgb_img)
- binary = np.zeros_like(h)
- binary[
- ((s > sthresh[0]) & (s <= sthresh[1])) &
- ((l > lthresh[0]) & (l <= lthresh[1]))
- ] = 1
- return binary
-
-def gradient_threshold(channel, thresh):
- # Take the derivative in x
- sobelx = cv2.Sobel(channel, cv2.CV_64F, 1, 0)
- # Absolute x derivative to accentuate lines away from horizontal
- abs_sobelx = np.absolute(sobelx)
- scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
- # Threshold gradient channel
- sxbinary = np.zeros_like(scaled_sobel)
- sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
- return sxbinary
-
-
-def histo_peak(histo):
- """Find left and right peaks of histogram"""
- midpoint = np.int32(histo.shape[0]/2)
- leftx_base = np.argmax(histo[:midpoint])
- rightx_base = np.argmax(histo[midpoint:]) + midpoint
- return leftx_base, rightx_base
-
-def get_lane_indices_sliding_windows(binary_warped, leftx_base, rightx_base, n_windows, margin, recenter_minpix):
- """Get lane line pixel indices by using sliding window technique"""
- # Create an output image to draw on and visualize the result
- out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
- out_img = out_img.copy()
- # Set height of windows
- window_height = np.int32(binary_warped.shape[0]/n_windows)
-
- # Identify the x and y positions of all nonzero pixels in the image
- nonzero = binary_warped.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
-
- # Create empty lists to receive left and right lane pixel indices
- left_lane_inds = []
- right_lane_inds = []
- # Current positions to be updated for each window
- leftx_current = leftx_base
- rightx_current = rightx_base
-
- for window in range(n_windows):
- # Identify window boundaries in x and y (and right and left)
- win_y_low = binary_warped.shape[0] - (window + 1) * window_height
- win_y_high = binary_warped.shape[0] - window * window_height
- win_xleft_low = leftx_current - margin
- win_xleft_high = leftx_current + margin
- win_xright_low = rightx_current - margin
- win_xright_high = rightx_current + margin
- cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 2)
- cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2)
- # Identify the nonzero pixels in x and y within the window
- good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
- (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
- good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
- (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
- # Append these indices to the lists
- left_lane_inds.append(good_left_inds)
- right_lane_inds.append(good_right_inds)
- # If you found > minpix pixels, recenter next window on their mean position
- if len(good_left_inds) > recenter_minpix:
- leftx_current = np.int32(np.mean(nonzerox[good_left_inds]))
- if len(good_right_inds) > recenter_minpix:
- rightx_current = np.int32(np.mean(nonzerox[good_right_inds]))
-
- # Concatenate the arrays of indices
- left_lane_inds = np.concatenate(left_lane_inds)
- right_lane_inds = np.concatenate(right_lane_inds)
- return left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img
-
-def get_lane_indices_from_prev_window(binary_warped, left_fit, right_fit, margin):
- """Detect lane line by searching around detection of previous sliding window detection"""
- nonzero = binary_warped.nonzero()
- nonzeroy = np.array(nonzero[0])
- nonzerox = np.array(nonzero[1])
-
- left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
- left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
- left_fit[1]*nonzeroy + left_fit[2] + margin)))
- right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
- right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
- right_fit[1]*nonzeroy + right_fit[2] + margin)))
-
- # Again, extract left and right line pixel positions
- leftx = nonzerox[left_lane_inds]
- lefty = nonzeroy[left_lane_inds]
- rightx = nonzerox[right_lane_inds]
- righty = nonzeroy[right_lane_inds]
-
- # Fit a second order polynomial to each
- left_fit = np.polyfit(lefty, leftx, 2)
- right_fit = np.polyfit(righty, rightx, 2)
-
- # Generate x and y values for plotting
- ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
- left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
- right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
- return left_lane_inds, right_lane_inds, ploty, left_fitx, right_fitx
-
-
-
-
-
-
-def main():
- # Set parameters
- cap = cv2.VideoCapture("/dev/video2")
-
-
- nx = 9
- ny = 6
- chessboard_dir = './camera_cal_adesso/*.jpg'
-
- # Create an instance of the CameraCalibration class
- calibrator = CameraCalibration(nx, ny, chessboard_dir)
-
-
- # Run the calibration and plotting
- MTX,DIST = calibrator.plots_and_calibration()
-
-
- ret = True
- while ret:
- ret, frame = cap.read()
-
- # cv2.imshow("INITAL FRAME", frame)
-
-
- # TEST_IMG = "./test_images/straight_lines1.jpg"
- lane_test_img = frame
- lane_test_img_rgb = cv2.cvtColor(lane_test_img, cv2.COLOR_BGR2RGB)
- lane_test_undist = cv2.undistort(lane_test_img_rgb, MTX, DIST, None, MTX)
- reg_distort = np.hstack((frame, lane_test_undist))
-
- # Display the combined frame
- cv2.imshow('Combined Video Feed', reg_distort)
-
- # LAB and LUV channel threshold
- s_binary = binary_threshold_lab_luv(lane_test_undist, B_CHANNEL_THRESH, L2_CHANNEL_THRESH)
-
- # Convert the frame to grayscale
- gray = cv2.cvtColor(lane_test_undist, cv2.COLOR_BGR2GRAY)
- gray = cv2.GaussianBlur(gray, (7, 7), 0)
-
-
-
-
- # Threshold the frame to get white pixels
- _, thresholded = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY)
-
- cv2.imshow("COPE",thresholded)
-
-
-
- # Gradient threshold on S channel
- h, l, s = seperate_hls(lane_test_undist)
- sxbinary = gradient_threshold(s, GRADIENT_THRESH)
-
-
- # Combine two binary images to view their contribution in green and red
- color_binary = np.dstack((sxbinary, s_binary, np.zeros_like(sxbinary))) * 255
-
-
- # bottom_row = np.hstack((sxbinary, color_binary))
-
- # binary_images = np.vstack((top_row, bottom_row))
-
- # # Display the combined frame
-
-
- IMG_SIZE = lane_test_undist.shape[::-1][1:]
- OFFSET = 300
-
- white_pixel_detector = WhitePixelDetector(lane_test_undist)
-
- result = white_pixel_detector.process_frame()
- if result:
- frame, tl_pt, tr_pt, bl_pt, br_pt = result
- print("Top Left:", tl_pt)
- print("Top Right:", tr_pt)
- print("Bottom Left:", bl_pt)
- print("Bottom Right:", br_pt)
-
-
-
-
- PRES_SRC_PNTS = np.float32([
- tl_pt, # Top-left corner
- bl_pt, # Bottom-left corner
- br_pt, # Bottom-right corner
- tr_pt # Top-right corner
- ])
-
- PRES_DST_PNTS = np.float32([
- [OFFSET, 0],
- [OFFSET, IMG_SIZE[1]],
- [IMG_SIZE[0]-OFFSET, IMG_SIZE[1]],
- [IMG_SIZE[0]-OFFSET, 0]
-])
- M = cv2.getPerspectiveTransform(PRES_SRC_PNTS, PRES_DST_PNTS)
- M_INV = cv2.getPerspectiveTransform(PRES_DST_PNTS, PRES_SRC_PNTS)
- warped = cv2.warpPerspective(lane_test_undist, M, IMG_SIZE, flags=cv2.INTER_LINEAR)
-
- warped_cp = warped.copy()
- warped_poly = cv2.polylines(warped_cp, np.int32([PRES_DST_PNTS]), True, (255,0,0), 3)
-
-
- cv2.polylines(lane_test_undist, np.int32([PRES_SRC_PNTS]), True, (255, 0, 0), 3)
- top_row = np.hstack((lane_test_undist, color_binary))
- cv2.imshow('2x2 Video Configuration', top_row)
- ort_dots = np.hstack((frame, warped_cp))
- cv2.imshow('LANES', ort_dots)
-
- N_WINDOWS = 10
- MARGIN = 100
- RECENTER_MINPIX = 50
-
- # Define conversions in x and y from pixels space to meters
- YM_PER_PIX = 30 / 720 # meters per pixel in y dimension
- XM_PER_PIX = 3.7 / 700 # meters per pixel in x dimension
-
-
-
-
-
- # Warp binary image of lane line
- binary_warped = cv2.warpPerspective(s_binary, M, IMG_SIZE, flags=cv2.INTER_LINEAR)
- histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0)
- print(histogram)
-
- # f, axarr = plt.subplots(2,2)
- # f.set_size_inches(18, 10)
- # axarr[0, 0].imshow(binary_warped, cmap='gray')
- # axarr[0, 1].plot(histogram)
- # axarr[0, 0].set_title("Warped Binary Lane Line")
- # axarr[0, 1].set_title("Histogram of Lane line Pixels")
-
-
- cv2.imshow("binary_warped",binary_warped)
-
- # leftx_base, rightx_base = histo_peak(histogram)
- # left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img = get_lane_indices_sliding_windows(
- # binary_warped, leftx_base, rightx_base, N_WINDOWS, MARGIN, RECENTER_MINPIX)
-
-
- # # Extract left and right line pixel positions
- # leftx = nonzerox[left_lane_inds]
- # lefty = nonzeroy[left_lane_inds]
- # rightx = nonzerox[right_lane_inds]
- # righty = nonzeroy[right_lane_inds]
-
-
- # # Fit a second order polynomial to each
- # left_fit = np.polyfit(lefty, leftx, 2)
- # right_fit = np.polyfit(righty, rightx, 2)
-
-
- # # Generate x and y values for plotting
- # ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
- # left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
- # right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
-
- # for y, left_x, right_x in zip(ploty.astype(int), left_fitx.astype(int), right_fitx.astype(int)):
- # cv2.circle(out_img, (left_x, y), 2, (255, 0, 0), -1) # Draw left line in red
- # cv2.circle(out_img, (right_x, y), 2, (0, 255, 255), -1) # Draw right line in yellow
-
-
-
-
- # # we may have to do some proccessing of our own. All it needs is something detected thats white.
- # cv2.imshow("PLYFIT",out_img)
-
-
-
-
-
-
-
-
-
-
-
- # Break the loop when 'q' key is pressed
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
-
- # Release the webcam and close all windows
- cap.release()
- cv2.destroyAllWindows()
-
-
-
-
- # f, axarr = plt.subplots(2,2)
- # f.set_size_inches(17, 10)
- # axarr[0, 0].imshow(lane_test_img_rgb)
- # axarr[0, 1].imshow(lane_test_undist)
- # axarr[0, 0].set_title("Original Image")
- # axarr[0, 1].set_title("Undistorted Image")
- # axarr[0, 0].axis('off')
- # axarr[0, 1].axis('off')
-
-
-
-
- # Show the plots
- # plt.show()
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/ros2/lanefollowingtest/whitepxlClass.py b/ros2/lanefollowingtest/whitepxlClass.py
deleted file mode 100644
index d0f7880f5..000000000
--- a/ros2/lanefollowingtest/whitepxlClass.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import cv2
-import numpy as np
-import math
-from functools import partial
-
-class WhitePixelDetector:
- def __init__(self,frame):
- # self.cap = cv2.VideoCapture(video_source)
- self.frame = frame
- self.tl = (0, 0) # Top left
- self.tr = (640, 0)
- self.bl = (0, 480)
- self.br = (640, 480)
- self.tl_dist = partial(self.euclidean_distance, reference_point=self.tl)
- self.tr_dist = partial(self.euclidean_distance, reference_point=self.tr)
- self.bl_dist = partial(self.euclidean_distance, reference_point=self.bl)
- self.br_dist = partial(self.euclidean_distance, reference_point=self.br)
-
- def euclidean_distance(self, coord, reference_point):
- return math.sqrt((coord[0] - reference_point[0]) ** 2 + (coord[1] - reference_point[1]) ** 2)
-
- def detect_white_pixels(self):
- gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
- gray = cv2.GaussianBlur(gray, (7, 7), 0)
- _, thresholded = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
- contours, _ = cv2.findContours(thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- white_pixel_coordinates = [point[0] for contour in contours for point in contour]
- return white_pixel_coordinates
-
- def interpolate_and_draw_line(self,coordinates):
- if len(coordinates) > 1:
- coordinates_np = np.array(coordinates, dtype=np.int32)
- coordinates_np = coordinates_np.reshape(-1, 1, 2)
- cv2.polylines(self.frame, [coordinates_np], isClosed=True, color=(0, 0, 255), thickness=2)
-
- def process_frame(self):
- # ret, frame = self.cap.read()
-
- height, width, _ = self.frame.shape
-
- top_left = (0, 0)
- top_right = (width, 0)
- bottom_left = (0, height)
- bottom_right = (width, height)
-
- white_pixel_coordinates = self.detect_white_pixels()
-
- if white_pixel_coordinates:
- sorted_coordinates_distance_tl = sorted(white_pixel_coordinates, key=self.tl_dist)
- sorted_coordinates_distance_tr = sorted(white_pixel_coordinates, key=self.tr_dist)
- sorted_coordinates_distance_bl = sorted(white_pixel_coordinates, key=self.bl_dist)
- sorted_coordinates_distance_br = sorted(white_pixel_coordinates, key=self.br_dist)
- tl_pt = sorted_coordinates_distance_tl[0]
- tr_pt = sorted_coordinates_distance_tr[0]
- bl_pt = sorted_coordinates_distance_bl[0]
- br_pt = sorted_coordinates_distance_br[0]
-
- # Draw circles on the frame
- cv2.circle(self.frame, tuple(tl_pt), 12, (0, 255, 0), -1)
- cv2.circle(self.frame, tuple(tr_pt), 12, (0, 255, 0), -1)
- cv2.circle(self.frame, tuple(bl_pt), 12, (0, 255, 0), -1)
- cv2.circle(self.frame, tuple(br_pt), 12, (0, 255, 0), -1)
-
- # Interpolate to form a line and draw it on the frame
- self.interpolate_and_draw_line(white_pixel_coordinates)
-
- return self.frame, tl_pt, tr_pt, bl_pt, br_pt
-
- return None
-
- # def release_capture(self):
- # self.cap.release()
-
-# # Instantiate the class with the video source
-# white_pixel_detector = WhitePixelDetector("/dev/video5")
-
-# while True:
-# result = white_pixel_detector.process_frame()
-# if result:
-# frame, tl_pt, tr_pt, bl_pt, br_pt = result
-# print("Top Left:", tl_pt)
-# print("Top Right:", tr_pt)
-# print("Bottom Left:", bl_pt)
-# print("Bottom Right:", br_pt)
-# cv2.imshow('Interpolated Line', frame)
-
-# # Check for a key press and break the loop if 'q' is pressed
-# if cv2.waitKey(1) & 0xFF == ord('q'):
-# break
-
-# # Release the webcam
-# white_pixel_detector.release_capture()
-
-# # Close all windows
-# cv2.destroyAllWindows()
diff --git a/ros2/log/latest_build b/ros2/log/latest_build
index 42edb5cc3..ebbe5d0f0 120000
--- a/ros2/log/latest_build
+++ b/ros2/log/latest_build
@@ -1 +1 @@
-build_2024-03-12_18-56-12
\ No newline at end of file
+build_2024-03-28_13-39-36
\ No newline at end of file
diff --git a/ros2/src/lanefollowingtest/LaneDetection.py b/ros2/src/lanefollowingtest/LaneDetection.py
index 4038d8ecc..179afe397 100644
--- a/ros2/src/lanefollowingtest/LaneDetection.py
+++ b/ros2/src/lanefollowingtest/LaneDetection.py
@@ -1,23 +1,22 @@
# from irobot_create_msgs.msg import WheelTicks, WheelTicks
-from geometry_msgs.msg import Twist
from std_msgs.msg import Float64
-
+from sensor_msgs.msg import Image
import rclpy
from rclpy.node import Node
-from rclpy.qos import qos_profile_sensor_data
-
+from cv_bridge import CvBridge
import cv2
import numpy as np
-
+from std_msgs.msg import String
# Inputs from both cameras
-vidcap_left = cv2.VideoCapture("/dev/video0")
-vidcap_left.set(3,640)
-vidcap_left.set(4,480)
-vidcap_right = cv2.VideoCapture("/dev/video2")
-vidcap_right.set(3,640)
-vidcap_right.set(4,480)
+vidcap_left = cv2.VideoCapture("/dev/video3")
+vidcap_left.set(3, 640)
+vidcap_left.set(4, 480)
+vidcap_right = cv2.VideoCapture("/dev/video1")
+vidcap_right.set(3, 640)
+vidcap_right.set(4, 480)
+
# These are constants
nwindows = 9
@@ -32,8 +31,8 @@
def nothing(x):
pass
-class Individual_Follower():
+class Individual_Follower():
def __init__(self):
self._fit = None
@@ -124,7 +123,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6):
return result
-#These are constants for the timer callback: Should be modified by UI
+# These are constants for the timer callback: Should be modified by UI
l_h = 0
l_s = 0
l_v = 200
@@ -136,7 +135,7 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6):
lower = np.array([l_h, l_s, l_v])
upper = np.array([u_h, u_s, u_v])
-#Coordinates for the 4 alignment points: again, should be handled by the UI
+# Coordinates for the 4 alignment points: again, should be handled by the UI
bl = (12, 355)
tl = (66, 304)
br = (635, 344)
@@ -149,12 +148,16 @@ def Plot_Line(self, smoothen=False, prevFrameCount=6):
class Lane_Follower(Node):
+ GUI = True
def __init__(self):
super().__init__('lane_detection_node')
self._tolerance = 0
self._left_follower = Individual_Follower()
self._right_follower = Individual_Follower()
+ # Determine which lane we're in: Left lane means the right image is dashed
+ self._Left_Lane = False
+
timer_period = 0.01 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
@@ -162,29 +165,38 @@ def __init__(self):
# Publisher for error from the lane lines relative to the camera FOV
self.camData_publisher = self.create_publisher(
Float64, '/cam_data', 10)
-
- def measure_position_meters(self,left,right):
+ self.lane_pubs = self.create_publisher(String, "lane_state",10)
+ if Lane_Follower.GUI:
+ self._bridge = CvBridge()
+ image_labels = ("raw_left", "raw_right", "tf_left",
+ "tf_right", "sliding_left", "sliding_right")
+ self._publishers = {label: self.create_publisher(
+ Image, "/" + label, 10) for label in image_labels}
+
+ def img_publish(self, label, img_raw):
+ if (self.GUI):
+ self._publishers[label].publish(
+ self._bridge.cv2_to_imgmsg(img_raw, encoding="passthrough"))
+
+ def measure_position_meters(self, left, right):
left_x_pos = 0
right_x_pos = 0
-
# Will be the same for left side & right side
y_max = self._left_follower._binary_warped.shape[0]
-
+
# Calculate left and right line positions at the bottom of the image
- if left is not None:
+ if left is not None:
left_fit = self._left_follower._fit
left_x_pos = left_fit[0]*y_max**2 + \
left_fit[1]*y_max + left_fit[2]
- cv2.imshow("Result Left", left)
-
+ self.img_publish("sliding_left", left)
if right is not None:
right_fit = self._right_follower._fit
right_x_pos = right_fit[0]*y_max**2 + \
right_fit[1]*y_max + right_fit[2]
- cv2.imshow("Result Right", right)
-
+ self.img_publish("sliding_right", right)
center_lanes_x_pos = (left_x_pos + right_x_pos)//2
# Calculate the deviation between the center of the lane and the center of the picture
@@ -193,26 +205,42 @@ def measure_position_meters(self,left,right):
veh_pos = (
(self._left_follower._binary_warped.shape[1]//2) - center_lanes_x_pos) * xm_per_pix
- if(left is None):
+ if (left is None):
veh_pos += 91
elif (right is None):
veh_pos -= 91
return veh_pos / 100
+ def determine_lane_size(self, img):
+ # Taking in both warped images, determine which lane line is the longer one, and ergo the "solid line"
+ # This may struggle on turns, but might work depending: Will need to characterize
+ # TODO, parametrize all constants here for tweaking in the U.I
+ edges = cv2.Canny(img, 50, 150)
+ lines = cv2.HoughLinesP(edges, 1, np.pi/180, 100,
+ minLineLength=100, maxLineGap=5)
+ m_length = 0
+ if(lines is not None):
+ for line in lines:
+ x1, y1, x2, y2 = line[0]
+ length = (x1 - x2) ^ 2 + (y1-y2) ^ 2
+ m_length = max(m_length, length)
+ return m_length
def timer_callback(self):
success_l, image_l = vidcap_left.read()
success_r, image_r = vidcap_right.read()
- images = [(image_l, "Left"), (image_r, "Right")]
- if not(success_l and success_r):
+ images = [(image_l, "left"), (image_r, "right")]
+ left_buffer = -1
+ right_buffer = -1
+ if not (success_l and success_r):
return
for image in images:
frame = image[0]
# frame = cv2.resize(image[0], (640, 480))
# I might've cooked with this list comprehension
- for point in (bl,tl,br,tr):
- frame = cv2.circle(frame,point,5,(0,0,255),-1)
+ for point in (bl, tl, br, tr):
+ frame = cv2.circle(frame, point, 5, (0, 0, 255), -1)
transformed_frame = cv2.warpPerspective(
frame, matrix, (640, 480))
@@ -221,35 +249,43 @@ def timer_callback(self):
hsv_transformed_frame = cv2.cvtColor(
transformed_frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_transformed_frame, lower, upper)
- if image[1] == "Left":
+ if image[1] == "left":
self._left_follower.set_binwarp(binwarp=mask)
+ left_buffer = self.determine_lane_size(mask)
else:
self._right_follower.set_binwarp(binwarp=mask)
-
- cv2.imshow("Original " + image[1], frame)
- cv2.imshow("Bird's Eye View " + image[1], transformed_frame)
+ right_buffer = self.determine_lane_size(mask)
+ self.img_publish("raw_" + image[1], frame)
+ self.img_publish("tf_" + image[1], transformed_frame)
result_left = self._left_follower.Plot_Line()
result_right = self._right_follower.Plot_Line()
-
msg_out = Float64()
- #TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid?
+ # TODO: Is this the behavior we want? Or do we need it to do something else if one of the lines is invalid?
if (result_left is not None or result_right is not None):
pos = self.measure_position_meters(result_left, result_right)
print(pos)
msg_out.data = pos
self.camData_publisher.publish(msg_out)
+ msg = String()
+ if(left_buffer > right_buffer):
+ msg.data = "In Left lane"
+ self.lane_pubs.publish(msg)
+ self._Left_Lane = True
+ elif (right_buffer > left_buffer):
+ msg.data = "In Right lane"
+ self.lane_pubs.publish(msg)
+ self._Left_Lane = False
else:
TOLERANCE = 100
self._tolerance += 1
- if(self._tolerance > TOLERANCE):
+ if (self._tolerance > TOLERANCE):
msg_out.data = 1000.0
self.camData_publisher.publish(msg_out)
-
if cv2.waitKey(10) == 27:
return
-
+
def main(args=None):
rclpy.init(args=args) # Initialize ROS2 program
diff --git a/ros2/src/lanefollowingtest/cam_publisher.py b/ros2/src/lanefollowingtest/cam_publisher.py
new file mode 100644
index 000000000..e5b21f2e5
--- /dev/null
+++ b/ros2/src/lanefollowingtest/cam_publisher.py
@@ -0,0 +1,67 @@
+import rclpy
+from rclpy.node import Node
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge
+import cv2
+
+
+class WebcamImagePublisher(Node):
+ def __init__(self):
+ super().__init__('webcam_image_publisher')
+ self.logger = self.get_logger()
+
+ # Set desired frame rate (may need adjustment based on hardware)
+ self.frame_rate = 30
+
+ # Initialize OpenCV video capture object
+ self.cap = cv2.VideoCapture("/dev/video0") # 0 for default webcam
+
+ if not self.cap.isOpened():
+ self.logger.error("Failed to open webcam!")
+ rclpy.shutdown()
+ return
+
+ # Initialize CvBridge instance
+ self.bridge = CvBridge()
+
+ # Create image publisher
+ self.image_pub = self.create_publisher(
+ Image, '/camera/image', 5) # Adjust queue size if needed
+
+ # # Create image publisher
+ self.hsv_pub = self.create_publisher(
+ Image, '/camera/image_hsv', 5) # Adjust queue size if needed
+
+
+ # Timer to capture and publish images
+ self.timer = self.create_timer(
+ 1 / self.frame_rate, self.capture_and_publish)
+
+ self.logger.info("Webcam image publisher node started!")
+
+ def capture_and_publish(self):
+ ret, cv_image = self.cap.read()
+
+ if not ret:
+ self.logger.warn("Failed to capture image!")
+ return
+ raw_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
+ raw_image = cv2.resize(raw_image, (640,480))
+ raw_image_msg = self.bridge.cv2_to_imgmsg(raw_image, "bgr8")
+ # Convert OpenCV image to ROS image message
+ hsv_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2HSV)
+ hsv_image_msg = self.bridge.cv2_to_imgmsg(hsv_image, encoding="passthrough")
+ # # Publish the image
+ self.image_pub.publish(raw_image_msg)
+ self.hsv_pub.publish(hsv_image_msg)
+
+
+def main():
+ rclpy.init()
+ node = WebcamImagePublisher()
+ rclpy.spin(node)
+ rclpy.shutdown()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ros2/src/lanefollowingtest/camera_sanity_alt.py b/ros2/src/lanefollowingtest/camera_sanity_alt.py
deleted file mode 100644
index 13a197164..000000000
--- a/ros2/src/lanefollowingtest/camera_sanity_alt.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import cv2
-
-vidcap_left = cv2.VideoCapture("/dev/video3")
-vidcap_left.set(3,640)
-vidcap_left.set(4,480)
-
-
-while True:
- left = vidcap_left.read()
- if(left[0]):
- cv2.imshow("left", left[1])
- if cv2.waitKey(10) == 27:
- break
diff --git a/ros2/src/lanefollowingtest/copypaste.py b/ros2/src/lanefollowingtest/copypaste.py
deleted file mode 100644
index ee2dc6eec..000000000
--- a/ros2/src/lanefollowingtest/copypaste.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import cv2
-import threading
-
-class camThread(threading.Thread):
- def __init__(self, previewName, cam_label):
- threading.Thread.__init__(self)
- self.previewName = previewName
- self.cam_label = cam_label
- def run(self):
- print ("Starting " + self.previewName)
- camPreview(self.previewName, self.cam_label)
-
-def camPreview(previewName, cam_label):
- cv2.namedWindow(previewName)
- cam = cv2.VideoCapture(cam_label)
- if cam.isOpened(): # try to get the first frame
- rval, frame = cam.read()
- else:
- rval = False
- while True:
- cam.set(3,640)
- cam.set(4,480)
- frame = cam.read()
- if(frame[0]):
- cv2.imshow(previewName, frame[1])
- key = cv2.waitKey(20)
- if key == 27: # exit on ESC
- break
-
-
- # rval, frame = cam.read()
- # if(rval):
- # cv2.imshow(previewName, frame)
- # key = cv2.waitKey(20)
- # cam.release()
-
- cv2.destroyWindow(previewName)
-
-# Create two threads as follows
-thread1 = camThread("Camera 1", "/dev/video1")
-thread2 = camThread("Camera 2", "/dev/video3")
-thread1.start()
-thread2.start()
\ No newline at end of file