Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/release_v2.23.0.0' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
themarpe committed Oct 3, 2023
2 parents 12f1a79 + 9591231 commit b38a7c0
Show file tree
Hide file tree
Showing 45 changed files with 534 additions and 84 deletions.
16 changes: 10 additions & 6 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ jobs:
run: |
python -m pip install --upgrade pip
sudo apt install libusb-1.0-0-dev
python -m pip install clang==14.0 --force-reinstall
python -m pip install -r docs/requirements_mkdoc.txt
- name: Configure project
run: cmake -S . -B build -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp"
Expand Down Expand Up @@ -154,8 +153,13 @@ jobs:
run: echo "BUILD_COMMIT_HASH=${{github.sha}}" >> $GITHUB_ENV
- name: Building wheel
run: python3 -m pip wheel . -w ./wheelhouse/ --verbose
- name: Auditing wheel
run: for whl in wheelhouse/*.whl; do auditwheel repair "$whl" --plat linux_armv7l -w wheelhouse/audited/; done
- name: Auditing wheels and adding armv6l tag (Running on RPi, binaries compiled as armv6l)
run: |
python3 -m pip install -U wheel auditwheel
for whl in wheelhouse/*.whl; do auditwheel repair "$whl" --plat linux_armv7l -w wheelhouse/preaudited/; done
for whl in wheelhouse/preaudited/*.whl; do python3 -m wheel tags --platform-tag +linux_armv6l "$whl"; done
mkdir -p wheelhouse/audited/
for whl in wheelhouse/preaudited/*linux_armv6l*.whl; do cp "$whl" wheelhouse/audited/$(basename $whl); done
- name: Archive wheel artifacts
uses: actions/upload-artifact@v3
with:
Expand Down Expand Up @@ -560,13 +564,13 @@ jobs:
uses: codex-/return-dispatch@v1
id: return_dispatch
with:
token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} # Note this is NOT GITHUB_TOKEN but a PAT
token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} # Note this is NOT GITHUB_TOKEN but a PAT
ref: main # or refs/heads/target_branch
repo: depthai-core-hil-tests
owner: luxonis
workflow: regression_test.yml
workflow_inputs: '{"commit": "${{ github.ref }}", "sha": "${{ github.sha }}", "parent_url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}'
workflow_timeout_seconds: 120 # Default: 300
workflow_inputs: '{"commit": "${{ github.ref }}", "parent_url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}'
workflow_timeout_seconds: 300 # was 120 Default: 300

- name: Release
run: echo "https://github.com/luxonis/depthai-core-hil-tests/actions/runs/${{steps.return_dispatch.outputs.run_id}}" >> $GITHUB_STEP_SUMMARY
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,9 @@ if(WIN32)
set(depthai_dll_libraries "$<TARGET_RUNTIME_DLLS:${TARGET_NAME}>")
endif()
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND
${CMAKE_COMMAND} -E copy ${depthai_dll_libraries} $<TARGET_FILE_DIR:${TARGET_NAME}>
"$<$<BOOL:${depthai_dll_libraries}>:${CMAKE_COMMAND};-E;copy_if_different;${depthai_dll_libraries};$<TARGET_FILE_DIR:${TARGET_NAME}>>"
COMMAND_EXPAND_LISTS
VERBATIM
)

# Disable "d" postfix, so python can import the library as is
Expand Down
2 changes: 1 addition & 1 deletion cmake/pybind11-mkdoc.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ function(pybind11_mkdoc_setup_internal target output_path mkdoc_headers enforce)

# Execute module pybind11_mkdoc to check if present
message(STATUS "Checking for pybind11_mkdoc")
execute_process(COMMAND ${PYTHON_EXECUTABLE} -m ${PYBIND11_MKDOC_MODULE_NAME} RESULT_VARIABLE error OUTPUT_QUIET ERROR_QUIET)
execute_process(COMMAND ${PYTHON_EXECUTABLE} -m ${PYBIND11_MKDOC_MODULE_NAME} --help RESULT_VARIABLE error OUTPUT_QUIET ERROR_QUIET)
if(error)
set(message "Checking for pybind11_mkdoc - not found, docstrings not available")
if(NOT enforce)
Expand Down
2 changes: 1 addition & 1 deletion depthai-core
Submodule depthai-core updated 55 files
+12 −4 CMakeLists.txt
+1 −1 cmake/Depthai/DepthaiDeviceSideConfig.cmake
+4 −2 cmake/Hunter/config.cmake
+23 −0 cmake/config.hpp.in
+3 −1 examples/CMakeLists.txt
+1 −1 examples/ColorCamera/rgb_preview.cpp
+43 −0 examples/Script/script_read_calibration.cpp
+2 −2 examples/Warp/warp_mesh.cpp
+1 −0 include/depthai/common/CameraFeatures.hpp
+31 −0 include/depthai/device/CalibrationHandler.hpp
+9 −6 include/depthai/device/DeviceBase.hpp
+0 −16 include/depthai/pipeline/datatype/AprilTags.hpp
+32 −0 include/depthai/pipeline/datatype/Buffer.hpp
+9 −0 include/depthai/pipeline/datatype/ImageManipConfig.hpp
+0 −16 include/depthai/pipeline/datatype/ImgDetections.hpp
+2 −17 include/depthai/pipeline/datatype/ImgFrame.hpp
+0 −16 include/depthai/pipeline/datatype/NNData.hpp
+0 −16 include/depthai/pipeline/datatype/SpatialImgDetections.hpp
+0 −16 include/depthai/pipeline/datatype/SpatialLocationCalculatorData.hpp
+4 −0 include/depthai/pipeline/datatype/ToFConfig.hpp
+0 −16 include/depthai/pipeline/datatype/TrackedFeatures.hpp
+0 −16 include/depthai/pipeline/datatype/Tracklets.hpp
+1 −1 include/depthai/pipeline/node/Camera.hpp
+3 −1 include/depthai/pipeline/node/ColorCamera.hpp
+2 −2 include/depthai/pipeline/node/Warp.hpp
+4 −1 include/depthai/xlink/XLinkStream.hpp
+1 −1 shared/depthai-shared
+61 −22 src/device/CalibrationHandler.cpp
+1 −1 src/device/Device.cpp
+57 −67 src/device/DeviceBase.cpp
+1 −1 src/openvino/BlobReader.cpp
+3 −25 src/pipeline/datatype/AprilTags.cpp
+35 −0 src/pipeline/datatype/Buffer.cpp
+10 −0 src/pipeline/datatype/ImageManipConfig.cpp
+3 −25 src/pipeline/datatype/ImgDetections.cpp
+3 −23 src/pipeline/datatype/ImgFrame.cpp
+3 −25 src/pipeline/datatype/NNData.cpp
+3 −25 src/pipeline/datatype/SpatialImgDetections.cpp
+3 −25 src/pipeline/datatype/SpatialLocationCalculatorData.cpp
+4 −11 src/pipeline/datatype/StreamMessageParser.cpp
+5 −0 src/pipeline/datatype/ToFConfig.cpp
+3 −25 src/pipeline/datatype/TrackedFeatures.cpp
+3 −25 src/pipeline/datatype/Tracklets.cpp
+1 −1 src/pipeline/node/Camera.cpp
+22 −0 src/pipeline/node/ColorCamera.cpp
+2 −2 src/pipeline/node/Warp.cpp
+85 −0 src/utility/EepromDataParser.cpp
+29 −0 src/utility/EepromDataParser.hpp
+11 −3 src/utility/Initialization.cpp
+2 −2 src/utility/Path.cpp
+1 −1 src/xlink/XLinkConnection.cpp
+19 −1 src/xlink/XLinkStream.cpp
+5 −1 tests/CMakeLists.txt
+33 −0 tests/src/device_usbspeed_test.cpp
+113 −0 tests/src/naming_test.cpp
3 changes: 2 additions & 1 deletion docs/requirements_mkdoc.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
git+https://github.com/luxonis/pybind11_mkdoc.git@59746f8d1645c9f00ebfb534186334d0154b5bd6
git+https://github.com/luxonis/pybind11_mkdoc.git@da6c64251a0ebbc3ffc007477a0b9c9f20cac165
libclang==16.0.6
149 changes: 149 additions & 0 deletions docs/source/components/nodes/camera.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
Camera
======

Camera node is a source of :ref:`image frames <ImgFrame>`. You can control in at runtime with the :code:`InputControl` and :code:`InputConfig`.
It aims to unify the :ref:`ColorCamera` and :ref:`MonoCamera` into one node.

Compared to :ref:`ColorCamera` node, Camera node:

- Supports **cam.setSize()**, which replaces both ``cam.setResolution()`` and ``cam.setIspScale()``. Camera node will automatically find resolution that fits best, and apply correct scaling to achieve user-selected size
- Supports **cam.setCalibrationAlpha()**, example here: :ref:`Undistort camera stream`
- Supports **cam.loadMeshData()** and **cam.setMeshStep()**, which can be used for custom image warping (undistortion, perspective correction, etc.)

Besides points above, compared to :ref:`MonoCamera` node, Camera node:

- Doesn't have ``out`` output, as it has the same outputs as :ref:`ColorCamera` (``raw``, ``isp``, ``still``, ``preview``, ``video``). This means that ``preview`` will output 3 planes of the same grayscale frame (3x overhead), and ``isp`` / ``video`` / ``still`` will output luma (useful grayscale information) + chroma (all values are 128), which will result in 1.5x bandwidth overhead

How to place it
###############

.. tabs::

.. code-tab:: py

pipeline = dai.Pipeline()
cam = pipeline.create(dai.node.Camera)

.. code-tab:: c++

dai::Pipeline pipeline;
auto cam = pipeline.create<dai::node::Camera>();


Inputs and Outputs
##################

.. code-block::
Camera node
┌──────────────────────────────┐
│ ┌─────────────┐ │
│ │ Image │ raw │ raw
│ │ Sensor │---┬--------├────────►
│ └────▲────────┘ | │
│ │ ┌--------┘ │
│ ┌─┴───▼─┐ │ isp
inputControl │ │ │-------┬-------├────────►
──────────────►│------│ ISP │ ┌─────▼────┐ │ video
│ │ │ | |--├────────►
│ └───────┘ │ Image │ │ still
inputConfig │ │ Post- │--├────────►
──────────────►│----------------|Processing│ │ preview
│ │ │--├────────►
│ └──────────┘ │
└──────────────────────────────┘
**Message types**

- :code:`inputConfig` - :ref:`ImageManipConfig`
- :code:`inputControl` - :ref:`CameraControl`
- :code:`raw` - :ref:`ImgFrame` - RAW10 bayer data. Demo code for unpacking `here <https://github.com/luxonis/depthai-experiments/blob/3f1b2b2/gen2-color-isp-raw/main.py#L13-L32>`__
- :code:`isp` - :ref:`ImgFrame` - YUV420 planar (same as YU12/IYUV/I420)
- :code:`still` - :ref:`ImgFrame` - NV12, suitable for bigger size frames. The image gets created when a capture event is sent to the Camera, so it's like taking a photo
- :code:`preview` - :ref:`ImgFrame` - RGB (or BGR planar/interleaved if configured), mostly suited for small size previews and to feed the image into :ref:`NeuralNetwork`
- :code:`video` - :ref:`ImgFrame` - NV12, suitable for bigger size frames

**ISP** (image signal processor) is used for bayer transformation, demosaicing, noise reduction, and other image enhancements.
It interacts with the 3A algorithms: **auto-focus**, **auto-exposure**, and **auto-white-balance**, which are handling image sensor
adjustments such as exposure time, sensitivity (ISO), and lens position (if the camera module has a motorized lens) at runtime.
Click `here <https://en.wikipedia.org/wiki/Image_processor>`__ for more information.

**Image Post-Processing** converts YUV420 planar frames from the **ISP** into :code:`video`/:code:`preview`/:code:`still` frames.

``still`` (when a capture is triggered) and ``isp`` work at the max camera resolution, while ``video`` and ``preview`` are
limited to max 4K (3840 x 2160) resolution, which is cropped from ``isp``.
For IMX378 (12MP), the **post-processing** works like this:

.. code-block::
┌─────┐ Cropping to ┌─────────┐ Downscaling ┌──────────┐
│ ISP ├────────────────►│ video ├───────────────►│ preview │
└─────┘ max 3840x2160 └─────────┘ and cropping └──────────┘
.. image:: /_static/images/tutorials/isp.jpg

The image above is the ``isp`` output from the Camera (12MP resolution from IMX378). If you aren't downscaling ISP,
the ``video`` output is cropped to 4k (max 3840x2160 due to the limitation of the ``video`` output) as represented by
the blue rectangle. The Yellow rectangle represents a cropped ``preview`` output when the preview size is set to a 1:1 aspect
ratio (eg. when using a 300x300 preview size for the MobileNet-SSD NN model) because the ``preview`` output is derived from
the ``video`` output.

Usage
#####

.. tabs::

.. code-tab:: py

pipeline = dai.Pipeline()
cam = pipeline.create(dai.node.Camera)
cam.setPreviewSize(300, 300)
cam.setBoardSocket(dai.CameraBoardSocket.CAM_A)
# Instead of setting the resolution, user can specify size, which will set
# sensor resolution to best fit, and also apply scaling
cam.setSize(1280, 720)

.. code-tab:: c++

dai::Pipeline pipeline;
auto cam = pipeline.create<dai::node::Camera>();
cam->setPreviewSize(300, 300);
cam->setBoardSocket(dai::CameraBoardSocket::CAM_A);
// Instead of setting the resolution, user can specify size, which will set
// sensor resolution to best fit, and also apply scaling
cam->setSize(1280, 720);

Limitations
###########

Here are known camera limitations for the `RVC2 <https://docs.luxonis.com/projects/hardware/en/latest/pages/rvc/rvc2.html#rvc2>`__:

- **ISP can process about 600 MP/s**, and about **500 MP/s** when the pipeline is also running NNs and video encoder in parallel
- **3A algorithms** can process about **200..250 FPS overall** (for all camera streams). This is a current limitation of our implementation, and we have plans for a workaround to run 3A algorithms on every Xth frame, no ETA yet

Examples of functionality
#########################

- :ref:`Undistort camera stream`

Reference
#########

.. tabs::

.. tab:: Python

.. autoclass:: depthai.node.Camera
:members:
:inherited-members:
:noindex:

.. tab:: C++

.. doxygenclass:: dai::node::Camera
:project: depthai-core
:members:
:private-members:
:undoc-members:

.. include:: ../../includes/footer-short.rst
2 changes: 1 addition & 1 deletion docs/source/components/nodes/warp.rst
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ Usage
# Warp engines to be used (0,1,2)
warp.setHwIds([1])
# Warp interpolation mode, choose between BILINEAR, BICUBIC, BYPASS
warp.setInterpolation(dai.node.Warp.Properties.Interpolation.BYPASS)
warp.setInterpolation(dai.Interpolation.NEAREST_NEIGHBOR)

.. code-tab:: c++

Expand Down
43 changes: 43 additions & 0 deletions docs/source/samples/Camera/camera_undistort.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
Undistort camera stream
=======================

This example shows how you can use :ref:`Camera` node to undistort wide FOV camera stream. :ref:`Camera` node will automatically undistort ``still``, ``video`` and ``preview`` streams, while ``isp`` stream will be left as is.

Demo
####

.. figure:: https://github.com/luxonis/depthai-python/assets/18037362/936b9ad7-179b-42a5-a6cb-25efdbdf73d9

Left: Camera.isp output. Right: Camera.video (undistorted) output

Setup
#####

.. include:: /includes/install_from_pypi.rst

Source code
###########

.. tabs::

.. tab:: Python

Also `available on GitHub <https://github.com/luxonis/depthai-python/blob/main/examples/Camera/camera_undistort.py>`__

.. literalinclude:: ../../../../examples/Camera/camera_undistort.py
:language: python
:linenos:

.. tab:: C++

Work in progress.


..
Also `available on GitHub <https://github.com/luxonis/depthai-core/blob/main/examples/Camera/camera_undistort.cpp>`__
.. literalinclude:: ../../../../depthai-core/examples/Camera/camera_undistort.cpp
:language: cpp
:linenos:

.. include:: /includes/footer-short.rst
6 changes: 6 additions & 0 deletions docs/source/tutorials/code_samples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Code Samples

../samples/bootloader/*
../samples/calibration/*
../samples/Camera/*
../samples/ColorCamera/*
../samples/crash_report/*
../samples/EdgeDetector/*
Expand Down Expand Up @@ -46,6 +47,11 @@ are presented with code.
- :ref:`Calibration Reader` - Reads calibration data stored on device over XLink
- :ref:`Calibration Load` - Loads and uses calibration data of version 6 (gen2 calibration data) in a pipeline


.. rubric:: Camera

- :ref:`Undistort camera stream` - Showcases how Camera node undistorts camera streams

.. rubric:: ColorCamera

- :ref:`Auto Exposure on ROI` - Demonstrates how to use auto exposure based on the selected ROI
Expand Down
32 changes: 32 additions & 0 deletions examples/Camera/camera_undistort.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import depthai as dai
import cv2

pipeline = dai.Pipeline()

# Define sources and outputs
camRgb: dai.node.Camera = pipeline.create(dai.node.Camera)

#Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
camRgb.setSize((1280, 800))

# Linking
videoOut = pipeline.create(dai.node.XLinkOut)
videoOut.setStreamName("video")
camRgb.video.link(videoOut.input)

ispOut = pipeline.create(dai.node.XLinkOut)
ispOut.setStreamName("isp")
camRgb.isp.link(ispOut.input)

with dai.Device(pipeline) as device:
video = device.getOutputQueue(name="video", maxSize=1, blocking=False)
isp = device.getOutputQueue(name="isp", maxSize=1, blocking=False)

while True:
if video.has():
cv2.imshow("video", video.get().getCvFrame())
if isp.has():
cv2.imshow("isp", isp.get().getCvFrame())
if cv2.waitKey(1) == ord('q'):
break
2 changes: 1 addition & 1 deletion examples/ColorCamera/rgb_preview.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
if device.getBootloaderVersion() is not None:
print('Bootloader version:', device.getBootloaderVersion())
# Device name
print('Device name:', device.getDeviceName())
print('Device name:', device.getDeviceName(), ' Product name:', device.getProductName())

# Output queue will be used to get the rgb frames from the output defined above
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
Expand Down
Loading

0 comments on commit b38a7c0

Please sign in to comment.