diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 793bf8c2a..c6cc4ac6d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -474,23 +474,17 @@ jobs: with: submodules: 'recursive' - # Get tag version - # TODO(themarpe) - Node12, has to be updated - - name: Get tag version - id: tag - uses: battila7/get-version-action@v2 - - uses: actions/setup-python@v4 with: python-version: '3.8' - name: Check if version matches - run: python3.8 -c 'import find_version as v; exit(0) if "${{ steps.tag.outputs.version-without-v }}" == v.get_package_version() else exit(1)' + run: python3.8 -c 'import find_version as v; exit(0) if "${{ github.ref_name }}" == f"v{v.get_package_version()}" else exit(1)' # Create GitHub release - uses: actions/create-release@master id: createRelease - name: Create ${{ steps.tag.outputs.version-without-v }} depthai-core release + name: Create ${{ github.ref_name }} depthai-python release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -505,8 +499,8 @@ jobs: draft: true - # Deploy to PyPi. Only when a commit is tagged - deploy-pypi: + # Deploy to PyPi and Artifactory. Only when a commit is tagged + deploy: if: startsWith(github.ref, 'refs/tags/v') needs: [release] runs-on: ubuntu-latest @@ -526,6 +520,12 @@ jobs: PYPI_SERVER: ${{ secrets.PYPI_SERVER }} PYPI_USER: ${{ secrets.PYPI_USER }} PYPI_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + - name: Run deploy to Artifactory + run: bash ./ci/upload-artifactory-release.sh + env: + ARTIFACTORY_URL: ${{ secrets.ARTIFACTORY_URL }} + ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }} + ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }} notify_robothub: if: startsWith(github.ref, 'refs/tags/v') @@ -539,3 +539,34 @@ jobs: repository: luxonis/robothub-apps event-type: depthai-python-release client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' + + # notify_hil_workflow_linux_x86_64: + # needs: [build-linux-x86_64] + # runs-on: ubuntu-latest + # steps: + # - name: Repository Dispatch + # uses: peter-evans/repository-dispatch@v2 + # with: + # token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} + # repository: luxonis/depthai-core-hil-tests + # event-type: python-hil-event + # client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' + + notify_hil_workflow_linux_x86_64: + needs: [build-linux-x86_64] + runs-on: ubuntu-latest + steps: + - name: Dispatch an action and get the run ID + uses: codex-/return-dispatch@v1 + id: return_dispatch + with: + token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} # Note this is NOT GITHUB_TOKEN but a PAT + ref: main # or refs/heads/target_branch + repo: depthai-core-hil-tests + owner: luxonis + workflow: regression_test.yml + workflow_inputs: '{"commit": "${{ github.ref }}", "sha": "${{ github.sha }}", "parent_url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}' + workflow_timeout_seconds: 120 # Default: 300 + + - name: Release + run: echo "https://github.com/luxonis/depthai-core-hil-tests/actions/runs/${{steps.return_dispatch.outputs.run_id}}" >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/test-install-dependencies.yml b/.github/workflows/test-install-dependencies.yml new file mode 100644 index 000000000..bc08b2c62 --- /dev/null +++ b/.github/workflows/test-install-dependencies.yml @@ -0,0 +1,64 @@ + name: OS Support + + on: + workflow_dispatch: + push: + paths: + - 'docs/source/_static/install_dependencies.sh' + - 'examples/install_requirements.py' + pull_request: + paths: + - 'docs/source/_static/install_dependencies.sh' + - 'examples/install_requirements.py' + + jobs: + test_linux: + runs-on: ubuntu-latest + strategy: + matrix: + container_image: ["fedora:34", "fedora:35", "fedora:36", "ubuntu:18.04", "ubuntu:20.04", "ubuntu:22.04", "ubuntu:22.10"] + container: + image: ${{ matrix.container_image }} + steps: + - uses: actions/checkout@v3 + - name: Install sudo + if: startsWith(matrix.container_image, 'fedora') == true + run: yum update -y && yum install -y sudo + - name: Install sudo + if: startsWith(matrix.container_image, 'ubuntu') == true + run: apt-get update -qq && apt-get -qq install sudo + - name: Install dependencies + run: | + ln -snf /usr/share/zoneinfo/UTC /etc/localtime && echo UTC > /etc/timezone # Otherwise tzdata installer prompts for user input + sed '/udevadm control --reload-rules && sudo udevadm trigger/d' docs/source/_static/install_dependencies.sh > tmp_script.sh # Doesn't work on docker + bash tmp_script.sh + - name: Install example requirements + run: | + python3 examples/install_requirements.py + test_macos: + strategy: + matrix: + os: ["macos-11", "macos-12"] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - name: Install dependencies + run: | + sed '/udevadm control --reload-rules && sudo udevadm trigger/d' docs/source/_static/install_dependencies.sh > tmp_script.sh + bash tmp_script.sh + - name: Install example requirements + run: | + python3 examples/install_requirements.py + test_windows: + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + - name: Download chocolatey + shell: pwsh + run: Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')) + - name: Install dependencies + shell: pwsh + run: choco install cmake git python --version 3.10 -y + - name: Install example requrirements + run: | + python examples/install_requirements.py diff --git a/.gitignore b/.gitignore index c6d3c5cc3..98f9cb60a 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,7 @@ wheelhouse/ .venv env/ venv/ +venv_*/ ENV/ env.bak/ venv.bak/ diff --git a/.readthedocs.yml b/.readthedocs.yml index 6de4e5931..14ba0d775 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -25,8 +25,7 @@ sphinx: # configuration: mkdocs.yml # Optionally build your docs in additional formats such as PDF -formats: - - pdf +formats: [] # Optionally set the version of Python and requirements required to build your docs python: diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ce0fa1b8..95e02005b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -23,8 +23,8 @@ file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generated/Hunter/config.cmake ${final_hun include("cmake/HunterGate.cmake") HunterGate( - URL "https://github.com/cpp-pm/hunter/archive/v0.23.322.tar.gz" - SHA1 "cb0ea1f74f4a2c49a807de34885743495fccccbe" + URL "https://github.com/cpp-pm/hunter/archive/v0.24.18.tar.gz" + SHA1 "1292e4d661e1770d6d6ca08c12c07cf34a0bf718" FILEPATH ${CMAKE_CURRENT_BINARY_DIR}/generated/Hunter/config.cmake # Combined config ) @@ -107,6 +107,7 @@ set(SOURCE_LIST src/pipeline/node/XLinkInBindings.cpp src/pipeline/node/XLinkOutBindings.cpp src/pipeline/node/ColorCameraBindings.cpp + src/pipeline/node/CameraBindings.cpp src/pipeline/node/MonoCameraBindings.cpp src/pipeline/node/StereoDepthBindings.cpp src/pipeline/node/NeuralNetworkBindings.cpp @@ -129,6 +130,8 @@ set(SOURCE_LIST src/pipeline/node/WarpBindings.cpp src/pipeline/node/SyncBindings.cpp src/pipeline/node/BenchmarkBindings.cpp + src/pipeline/node/UVCBindings.cpp + src/pipeline/node/ToFBindings.cpp src/pipeline/datatype/ADatatypeBindings.cpp src/pipeline/datatype/AprilTagConfigBindings.cpp diff --git a/MANIFEST.in b/MANIFEST.in index 70b8bf13a..536965148 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,6 @@ include README.md LICENSE CMakeLists.txt pyproject.toml include find_version.py +include generate_stubs.py graft cmake graft generated graft ci diff --git a/README.md b/README.md index 7c3844cfe..450d7ffd5 100644 --- a/README.md +++ b/README.md @@ -96,8 +96,8 @@ If `TEST_TIMEOUT=0`, the test will run until stopped or it ends. ## Tested platforms -- Windows 10 -- Ubuntu 16.04, 18.04; +- Windows 10, Windows 11 +- Ubuntu 18.04, 20.04, 22.04; - Raspbian 10; - macOS 10.14.6, 10.15.4; diff --git a/ci/upload-artifactory-release.sh b/ci/upload-artifactory-release.sh new file mode 100755 index 000000000..6669bcace --- /dev/null +++ b/ci/upload-artifactory-release.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +curl -fL https://getcli.jfrog.io | sh + +cd wheelhouse/audited/ || exit 1 +export PATH_PREFIX=luxonis-python-release-local/depthai + +../../jfrog config add --artifactory-url=$ARTIFACTORY_URL --user=$ARTIFACTORY_USER --password=$ARTIFACTORY_PASS +../../jfrog rt u "*" "$PATH_PREFIX/" diff --git a/depthai-core b/depthai-core index 829c56c7f..1561438a5 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 829c56c7f84dee225ad593fedd80d343d25166c4 +Subproject commit 1561438a550aa5fee152f85425f49618ba7a88ad diff --git a/docs/conf.py.in b/docs/conf.py.in index e6e8a5d74..1c057d961 100644 --- a/docs/conf.py.in +++ b/docs/conf.py.in @@ -26,7 +26,7 @@ sys.path.insert(0, os.path.abspath(source_directory / ".." / "_extensions")) # -- Project information ----------------------------------------------------- project = "DepthAI API Docs" -copyright = u"@build_year@, Luxonis" +html_show_copyright=False author = "Luxonis" version = u"@DEPTHAI_PYTHON_VERSION@" release = version diff --git a/docs/requirements.txt b/docs/requirements.txt index 21301ae81..680df7b44 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -8,4 +8,5 @@ sphinx-autopackagesummary==1.3 autodocsumm==0.2.2 pathlib==1.0.1 jinja2==3.0.3 +urllib3==1.26.15 # New urllib version breaks sphinx -r ./requirements_mkdoc.txt diff --git a/docs/requirements_mkdoc.txt b/docs/requirements_mkdoc.txt index ba5e28492..212071f2c 100644 --- a/docs/requirements_mkdoc.txt +++ b/docs/requirements_mkdoc.txt @@ -1,2 +1,2 @@ -git+https://github.com/luxonis/pybind11_mkdoc.git@47a353ae22a3ca2fe1ca47f47b38613dcfb1043b -numpy # Needed because of xtensor-python +git+https://github.com/luxonis/pybind11_mkdoc.git@59746f8d1645c9f00ebfb534186334d0154b5bd6 +numpy # Needed because of xtensor-python \ No newline at end of file diff --git a/docs/source/_static/images/components/disp_to_depth.jpg b/docs/source/_static/images/components/disp_to_depth.jpg new file mode 100644 index 000000000..c5bda25f5 Binary files /dev/null and b/docs/source/_static/images/components/disp_to_depth.jpg differ diff --git a/docs/source/_static/images/components/disparity_confidence.jpg b/docs/source/_static/images/components/disparity_confidence.jpg new file mode 100644 index 000000000..3d8d4b3b4 Binary files /dev/null and b/docs/source/_static/images/components/disparity_confidence.jpg differ diff --git a/docs/source/_static/images/components/disparity_explanation.jpeg b/docs/source/_static/images/components/disparity_explanation.jpeg index f4df86484..e5450fd20 100644 Binary files a/docs/source/_static/images/components/disparity_explanation.jpeg and b/docs/source/_static/images/components/disparity_explanation.jpeg differ diff --git a/docs/source/_static/images/components/disparity_shift.png b/docs/source/_static/images/components/disparity_shift.png new file mode 100644 index 000000000..83a3b5ee5 Binary files /dev/null and b/docs/source/_static/images/components/disparity_shift.png differ diff --git a/docs/source/_static/images/components/pointcloud_layering.jpg b/docs/source/_static/images/components/pointcloud_layering.jpg new file mode 100644 index 000000000..554816e8d Binary files /dev/null and b/docs/source/_static/images/components/pointcloud_layering.jpg differ diff --git a/docs/source/_static/images/components/spatial-coordinates.png b/docs/source/_static/images/components/spatial-coordinates.png new file mode 100644 index 000000000..7aaaf3c6e Binary files /dev/null and b/docs/source/_static/images/components/spatial-coordinates.png differ diff --git a/docs/source/_static/images/components/theoretical_error.jpg b/docs/source/_static/images/components/theoretical_error.jpg new file mode 100644 index 000000000..9b71d37a5 Binary files /dev/null and b/docs/source/_static/images/components/theoretical_error.jpg differ diff --git a/docs/source/_static/images/favicon.png b/docs/source/_static/images/favicon.png index 2bbaad0ec..decd05d31 100644 Binary files a/docs/source/_static/images/favicon.png and b/docs/source/_static/images/favicon.png differ diff --git a/docs/source/_static/images/logo.png b/docs/source/_static/images/logo.png deleted file mode 100644 index 5f294fb91..000000000 Binary files a/docs/source/_static/images/logo.png and /dev/null differ diff --git a/docs/source/_static/install_dependencies.sh b/docs/source/_static/install_dependencies.sh index 9267743d1..9b25c7a08 100755 --- a/docs/source/_static/install_dependencies.sh +++ b/docs/source/_static/install_dependencies.sh @@ -1,6 +1,7 @@ #!/bin/bash -trap 'RET=$? ; echo -e >&2 "\n\x1b[31mFailed installing dependencies. Could be a bug in the installer or unsupported platform. Open a bug report over at https://github.com/luxonis/depthai - exited with status $RET at line $LINENO \x1b[0m\n" ; exit $RET' ERR +trap 'RET=$? ; echo -e >&2 "\n\x1b[31mFailed installing dependencies. Could be a bug in the installer or unsupported platform. Open a bug report over at https://github.com/luxonis/depthai - exited with status $RET at line $LINENO \x1b[0m\n" ; +exit $RET' ERR readonly linux_pkgs=( python3 @@ -50,7 +51,7 @@ readonly ubuntu_pkgs_pre22_04=( libdc1394-22-dev ) -readonly ubuntu_pkgs_22_04=( +readonly ubuntu_pkgs_post22_04=( "${ubuntu_pkgs[@]}" libdc1394-dev ) @@ -96,26 +97,11 @@ print_and_exec () { if [[ $(uname) == "Darwin" ]]; then echo "During Homebrew install, certain commands need 'sudo'. Requesting access..." sudo true - arch_cmd= - if [[ $(uname -m) == "arm64" ]]; then - arch_cmd="arch -x86_64" - echo "Running in native arm64 mode, will prefix commands with: $arch_cmd" - # Check if able to run with x86_64 emulation - retcode=0 - $arch_cmd true || retcode=$? - if [[ $retcode -ne 0 ]]; then - print_action "=== Installing Rosetta 2 - Apple binary translator" - # Prompts the user to agree to license: - # Could be automated by adding: --agree-to-license - print_and_exec softwareupdate --install-rosetta - fi - fi homebrew_install_url="https://raw.githubusercontent.com/Homebrew/install/master/install.sh" print_action "Installing Homebrew from $homebrew_install_url" # CI=1 will skip some interactive prompts - CI=1 $arch_cmd /bin/bash -c "$(curl -fsSL $homebrew_install_url)" - print_and_exec $arch_cmd brew install python3 git - print_and_exec python3 -m pip install -U pip + CI=1 /bin/bash -c "$(curl -fsSL $homebrew_install_url)" + print_and_exec brew install git echo echo "=== Installed successfully! IMPORTANT: For changes to take effect," echo "please close and reopen the terminal window, or run: exec \$SHELL" @@ -126,8 +112,8 @@ elif [ -f /etc/os-release ]; then if [[ "$ID" == "ubuntu" || "$ID" == "debian" || "$ID_LIKE" == "ubuntu" || "$ID_LIKE" == "debian" || "$ID_LIKE" == "ubuntu debian" ]]; then if [[ ! $(uname -m) =~ ^arm* ]]; then sudo apt-get update - if [[ "$VERSION_ID" == "22.04" ]]; then - sudo apt-get install -y "${ubuntu_pkgs_22_04[@]}" + if [[ "$VERSION_ID" > "22.04" || "$VERSION_ID" == "22.04" ]]; then + sudo apt-get install -y "${ubuntu_pkgs_post22_04[@]}" else sudo apt-get install -y "${ubuntu_pkgs_pre22_04[@]}" fi @@ -177,3 +163,5 @@ else echo "ERROR: Host not supported" exit 99 fi + +echo "Finished installing global libraries." diff --git a/docs/source/_static/install_depthai.sh b/docs/source/_static/install_depthai.sh new file mode 100755 index 000000000..27aa3b75c --- /dev/null +++ b/docs/source/_static/install_depthai.sh @@ -0,0 +1,247 @@ +#!/bin/bash + +APP_NAME="depthai" +WORKING_DIR_NAME="Luxonis" +WORKING_DIR="$HOME/$WORKING_DIR_NAME" +mkdir "$WORKING_DIR" +install_path="" +path_correct="false" + +trap 'RET=$? ; echo -e >&2 "\n\x1b[31mFailed installing dependencies. Could be a bug in the installer or unsupported platform. Open a bug report over at https://github.com/luxonis/depthai - exited with status $RET at line $LINENO \x1b[0m\n" ; exit $RET' ERR + +while [ "$path_correct" = "false" ] +do + echo "" + echo 'ENTER absolute installation path for depthai or leave empty and default path: $HOME will be used.' + read -e install_path < /dev/tty + echo "" + + if [ "$install_path" = "" ]; then + echo "Using default installation path: $WORKING_DIR" + mkdir -p "$WORKING_DIR" + else + echo "Using given installation path: $install_path" + WORKING_DIR="$install_path" + fi + + if [ -d "$WORKING_DIR" ]; then + echo "Directory: $WORKING_DIR is OK" + path_correct="true" + else + echo "Directory: $WORKING_DIR is not valid. Try again!" + fi +done + +DEPTHAI_DIR="$WORKING_DIR/$APP_NAME" +VENV_DIR="$WORKING_DIR/venv" +ENTRYPOINT_DIR="$DEPTHAI_DIR/entrypoint" + +# Get Python version or find out that python 3.10 must be installed +python_executable=$(which python3) +python_chosen="false" +install_python="false" +python_version=$(python3 --version) +python_version_number="" +if [[ "$python_version" != 'Python'* ]]; then + python_version="" +fi +echo "" + +# check default python version, offer it to the user or get another one +while [ "$python_chosen" = "false" ] +do + if [[ "$python_version" == "" ]]; then + echo "No python version found." + echo "Input path for python binary, version 3.8 or higher, or leave empty and python 3.10 will be installed for you." + echo "Press any key to continue" + read -e python_binary_path < /dev/tty + # python not found and user wants to install python 3.10 + if [ "$python_binary_path" = "" ]; then + install_python="true" + python_chosen="true" + fi + else + # E.g Python 3.10 -> nr_1=3, nr_2=10, for Python 3.7.5 -> nr_1=r, nr_2=7 + nr_1="${python_version:7:1}" + nr_2=$(echo "${python_version:9:2}" | tr -d -c 0-9) + echo "Python version: $python_version found." + if [ "$nr_1" -gt 2 ] && [ "$nr_2" -gt 7 ]; then # first two digits of python version greater then 3.7 -> python version 3.8 or greater is allowed. + echo "If you want to use it for installation, press ANY key, otherwise input path to python binary." + echo "Press any key to continue" + read -e python_binary_path < /dev/tty + # user wants to use already installed python whose version is high enough + if [ "$python_binary_path" = "" ]; then + python_chosen="true" + fi + else + echo "This python version is not supported by depthai. Enter path to python binary version et least 3.8, or leave empty and python 3.10 will be installed automatically." + echo "Press any key to continue" + read -e python_binary_path < /dev/tty + # python version is too low and user wants to install python 3.10 + if [ "$python_binary_path" = "" ]; then + install_python="true" + python_chosen="true" + fi + fi + fi + # User entered some path that should lead to python binary, save python --version output and the rest is dealt in the while loop logic. + if [ "$python_binary_path" != "" ]; then + python_executable="$python_binary_path" + python_version=$($python_binary_path --version) + if [[ "$python_version" != 'Python'* ]]; then + python_version="" + fi + fi +done + + +write_in_file () { + # just make sure only strings are appended which are not in there yet + # first arg is text to write, second arg is the file path + if ! grep -Fxq "$1" "$2" + then + echo "$1" >> "$2" + fi +} + +COMMENT='# Entry point for Depthai demo app, enables to run in terminal' +BASHRC="$HOME/.bashrc" +ZSHRC="$HOME/.zshrc" +ADD_ENTRYPOINT_TO_PATH='export PATH=$PATH'":$ENTRYPOINT_DIR" + +# add to .bashrc only if it is not in there already +write_in_file "$COMMENT" "$BASHRC" +write_in_file "$ADD_ENTRYPOINT_TO_PATH" "$BASHRC" + +if [ -f "$ZSHRC" ]; then + write_in_file "$COMMENT" "$ZSHRC" + write_in_file "$ADD_ENTRYPOINT_TO_PATH" "$ZSHRC" +fi + +if [[ $(uname -s) == "Darwin" ]]; then + echo _____________________________ + echo "Calling macOS_installer.sh" + echo _____________________________ + echo "Running macOS installer." + + echo "Installing global dependencies." + bash -c "$(curl -fL https://docs.luxonis.com/install_dependencies.sh)" + + echo "Upgrading brew." + brew update + + # clone depthai form git + if [ -d "$DEPTHAI_DIR" ]; then + echo "Demo app already downloaded. Checking out main and updating." + else + echo "Downloading demo app." + git clone https://github.com/luxonis/depthai.git "$DEPTHAI_DIR" + fi + cd "$DEPTHAI_DIR" + git fetch + git checkout main + git pull + + # install python 3.10 and python dependencies + brew update + + if [ "$install_python" == "true" ]; then + echo "installing python 3.10" + brew install python@3.10 + python_executable=$(which python3.10) + fi + + # pip does not have pyqt5 for arm + if [[ $(uname -m) == 'arm64' ]]; then + echo "Installing pyqt5 with homebrew." + brew install pyqt@5 + fi + + # create python virtual environment + echo "Creating python virtual environment in $VENV_DIR" + echo "$python_executable" + "$python_executable" -m venv "$VENV_DIR" + # activate environment + source "$VENV_DIR/bin/activate" + python -m pip install --upgrade pip + + # install launcher dependencies + # only on mac silicon point PYTHONPATH to pyqt5 installation via homebrew, otherwise install pyqt5 with pip + if [[ $(uname -m) == 'arm64' ]]; then + if [[ ":$PYTHONPATH:" == *":/opt/homebrew/lib/python3.10/site-packages:"* ]]; then + echo "/opt/homebrew/lib/python$nr_1.$nr_2/site-packages already in PYTHONPATH" + else + export "PYTHONPATH=/opt/homebrew/lib/python$nr_1.$nr_2/site-packages:"$PYTHONPATH + echo "/opt/homebrew/lib/pythonv$nr_1.$nr_2/site-packages added to PYTHONPATH" + fi + else + pip install pyqt5 + fi + + pip install packaging + +elif [[ $(uname -s) == "Linux" ]]; then + echo _____________________________ + echo "Calling linux_installer.sh" + echo _____________________________ + + echo "Updating sudo-apt." + sudo apt-get update + + echo "Installing global dependencies." + sudo wget -qO- https://docs.luxonis.com/install_dependencies.sh | bash + + echo -e '\nRunning Linux installer.' + + # clone depthai form git + if [ -d "$DEPTHAI_DIR" ]; then + echo "Demo app already downloaded. Checking out main and updating." + + else + echo "Downloading demo app." + git clone https://github.com/luxonis/depthai.git "$DEPTHAI_DIR" + fi + + cd "$DEPTHAI_DIR" + git fetch + git checkout main + git pull + + # install python 3.10 + if [ "$install_python" == "true" ]; then + echo "installing python 3.10" + + sudo yes "" | sudo add-apt-repository ppa:deadsnakes/ppa + sudo apt -y install python3.10 + sudo apt -y install python3.10-venv + python_executable=$(which python3.10) + fi + + echo "Creating python virtual environment in $VENV_DIR" + + machine=$(uname -m) + if [[ $machine != 'armv6l' && $machine != 'armv7l' && $machine != 'aarch64' && $machine != 'arm64' ]]; then + "$python_executable" -m venv "$VENV_DIR" + else + "$python_executable" -m venv "$VENV_DIR" --system-site-packages + fi + + source "$VENV_DIR/bin/activate" + python -m pip install --upgrade pip + + pip install packaging + + if [[ $machine != 'armv6l' && $machine != 'armv7l' && $machine != 'aarch64' && $machine != 'arm64' ]]; then + pip install pyqt5 + fi +else + echo "Error: Host $(uname -s) not supported." + exit 99 +fi + +echo -e '\n\n:::::::::::::::: INSTALATION COMPLETE ::::::::::::::::\n' +echo -e '\nTo run demo app write in terminal.' +echo "Press ANY KEY to finish and run the demo app..." +read -n1 key < /dev/tty +echo "STARTING DEMO APP." +python "$DEPTHAI_DIR/launcher/launcher.py" -r "$DEPTHAI_DIR" diff --git a/docs/source/components/bootloader.rst b/docs/source/components/bootloader.rst index 97abd163b..d9660e713 100644 --- a/docs/source/components/bootloader.rst +++ b/docs/source/components/bootloader.rst @@ -3,52 +3,83 @@ Bootloader ========== -DepthAI bootloader is a small program which **handles the booting process**, either by **booting the flashed application**, -or by **initializing the OAK PoE camera** so DepthAI API can connect to it. +DepthAI bootloader is a small program which handles the booting process, either by **booting the flashed application** (see :ref:`Standalone mode`), +or by **initializing the OAK PoE camera** so DepthAI API can connect to it. OAK PoE cameras already come with bootloader flashed at the factory. -To be able to run in :ref:`Standalone mode`, the Depthai bootloader must be first flashed to the devices flash. -This step is required only once. - -Once the device has the bootloader flashed, it will perform the same as before. Running pipelines with a host -connected doesn't require any changes. - -Suggested workflow is to perform as much of development as possible with the host connected as the -iteration cycle is greatly improved. +Bootloader is part of the ``depthai`` library, so to eg. flash the newest bootloader, you should use the newest ``depthai`` library. Device Manager ############## -``device_manager.py`` is a Python helper that interfaces with device :ref:`Bootloader` and bootloader configuration. +``device_manager.py`` is a Python script that interfaces with device :ref:`Bootloader` and bootloader configuration. It can be found at `depthai-python/utilities `__. -.. image:: https://user-images.githubusercontent.com/18037362/171629704-0f78f31a-1778-4338-8ac0-bdfb0d2d593f.png +.. image:: https://github.com/luxonis/depthai-python/assets/18037362/1de0d86f-58bf-4979-b7d0-5ca7723db599 -Device Manager Usage --------------------- +About Device +------------ -**About device tab** - Select a camera to see its metadata - like MxID, flashed bootloader version, device state etc. +Allows you to select the device you want to connect to, and you see metadata of connected device. * First we have to select the device we want to connect (boot) to, you can select that using: * **Dropdown** which contains found device MX Ids. Dropdown will only get updated when starting the app. * **Specify IP** button if your OAK PoE camera isn't in the same LAN. * **Search** feature - a new window will show that has a table with all available cameras (either via USB port or on LAN - OAK PoEs), their MxId, name, and status. Clicking on a table row will select that device and boot to it. -* ``Flash newest Bootloader`` button will flash the ``newest bootloader`` to the device. You can select AUTO, USB or NETWORK bootloader. - * **AUTO** will select the connection type of bootloader with which the camera is currently connected to. If you are connected via USB (doing factory reset) to an OAK PoE camera, you shouldn't select AUTO, as it will flash USB bootloader. - * **USB** bootloader will try to boot the application that is stored on flash memory. If it can't find flashed application, it will just behave as normal USB OAK - so it will wait until a host computer initializes the application. - * **NETWORK** bootloader is used by the OAK PoE cameras, and is flashed at the factory. It handles network initialization so the OAK PoE cameras can be booted through the LAN. -* ``Factory reset`` will erase the whole flash content and re-flash it with only the USB or NETWORK bootloader. Flashed application (pipeline, assets) and bootloader configurations will be lost. -* ``Boot into USB recovery mode`` will force eg. OAK PoE camera to be available through the USB connector, even if its boot pins are set to PoE booting. It is mostly used by our firmware developers. +Configuration settings +---------------------- -**Configuration settings tab** - After you select a device that has bootloader flashed, you can also configure bootloader -configuration. +After you select a device that has bootloader flashed, you can also configure bootloader configuration. - If the device has **NETWORK bootloader flashed**, you will be able to set its static/dynamic IP/mask/gateway, DNS, MAC, etc. - If the device has **USB bootloader flashed**, you will be able to set its USB max speed and USB timeout. -After setting some values, you have to click on the ``Flash configuration`` button. You can also flash a :ref:`DAP`, -or clear the bootloader config. +After setting some values, you have to click on the ``Flash configuration`` button. You can also ``Clear configuration``, or ``View configuration`` (its JSON). + +.. figure:: https://github.com/luxonis/depthai-python/assets/18037362/4bced0ab-92fa-4a73-986f-4a3ba8848940 + + When flashing static IP, make sure to also set the gateway/mask + +Applications settings +--------------------- + +Useful when dealing with :ref:`Standalone mode` applications. + +- **Flash application**: Select DepthAI Application Package (``.dap``) you want to flash the device. Below is a code snippet that showcases how to create the dap file. +- **Remove application**: Removes/clears the flashed application from the device +- **Open device streaming application**: Starts streaming frames from all connected cameras on the device. + +.. code-block:: python + + import depthai as dai + pipeline = dai.Pipeline() + # ... add nodes to pipeline + dai.DeviceBootloader.saveDepthaiApplicationPackage( + './file.dap', # Where to save the .dap file + pipeline, # My pipeline + compress=True, # Compress the FW and assets. In my case, it went from 24MB -> 9.5MB + applicationName='myAppName' # Optional, so you know which app is flashed afterwards + ) + +Danger Zone +----------- + +.. warning:: + + This section can potentially soft-brick your device, so be careful when using it. + +To prevent soft-bricking, OAK devices (since 2023) have factory bootloader and user bootloader. If user flashes a corrupted user bootloader, it will fallback to using factory bootloader. When updating bootloader, +Device Manager will try to flash the user bootloader first, if flashed (factory) bootloader supports it. If it's not possible, it will flash the factory bootloader. + +* **Update Bootloader** button will flash the newest bootloader to the device. You can select AUTO, USB or NETWORK bootloader. + + * AUTO will select the connection type of bootloader with which the camera is currently connected to. If you are connected via USB (doing factory reset) to an OAK PoE camera, you shouldn't select AUTO, as it will flash USB bootloader. + * USB bootloader will try to boot the application that is stored on flash memory. If it can't find flashed application, it will just behave as normal USB OAK - so it will wait until a host computer initializes the application. + * NETWORK bootloader is used by the OAK PoE cameras, and is flashed at the factory. It handles network initialization so the OAK PoE cameras can be booted through the LAN. +* **Flash Factory Bootloader**: If you want to flash the factory bootloader, you can use this button. It will flash the factory bootloader, even if the user bootloader is already flashed. +* **Factory reset** will erase the whole flash content and re-flash it with only the USB or NETWORK bootloader. Flashed application (pipeline, assets) and bootloader configurations will be lost. +* **Boot into USB recovery mode** will force eg. OAK PoE camera to be available through the USB connector, even if its boot pins are set to PoE booting. It is mostly used by our firmware developers. Boot switches ############# @@ -56,9 +87,16 @@ Boot switches - **Boot from flash** - DIP switch: 0x03 (switches 5,6 ON) - used by OAK PoE and USB cameras when bootloader is installed. - **Recovery mode for USB** - DIP switch: 0x16 (switches 2,4,5 ON) - to boot directly into USB mode, so camera waits for the host to connect to it via USB. -.. image:: https://user-images.githubusercontent.com/18037362/154956812-c3fcc961-af46-4dfd-8080-e15c8c6b43f0.png +.. figure:: https://user-images.githubusercontent.com/18037362/154956812-c3fcc961-af46-4dfd-8080-e15c8c6b43f0.png -OAK-D-PoE with switches 2,4,5 ON, for the purpose of connecting to the device via USB. + OAK-D-PoE with switches 2,4,5 ON, for the purpose of connecting to the device via USB. + +On newer versions of OAK devices we have 0 ohm resistors (see image below) instead of a DIP switch, which means OAK will boot into flash by default. These new devices +have the bootloader flashed, which handles the booting process. There's also an additional button on the baseboard that switches boot to recovery mode when +pressed, which can be useful if the bootloader hasn't yet been flashed (eg. early access devices). You need to press this button when powering the device (when booting +happens). + +.. image:: https://user-images.githubusercontent.com/18037362/207295832-613fae0a-c0ae-411e-b03b-8a4736f1bfc7.png API ### @@ -90,9 +128,16 @@ to the pipeline. Instead of transferring the whole package, only Pipeline descri Depthai application package (**.dap**) consists of: - SBR (512B header which describes sections of data) -- Depthai device firmware (section “__firmware”) -- Pipeline description (section “pipeline”) -- Assets structure (section “assets”) -- Asset storage (section “asset_storage”) +- Depthai device firmware (section "__firmware") +- Pipeline description (section "pipeline") +- Assets structure (section "assets") +- Asset storage (section "asset_storage") + +MAC address +########### + +All OAK PoE cameras have a unique MAC address which is used to identify the device on the network. It is calculated from the +MxID of the device, see `logic here `__. +The MAC address is stored in the DeviceBootloader configuration. .. include:: ../includes/footer-short.rst diff --git a/docs/source/components/messages/image_manip_config.rst b/docs/source/components/messages/image_manip_config.rst index 85ac597f9..5535536f5 100644 --- a/docs/source/components/messages/image_manip_config.rst +++ b/docs/source/components/messages/image_manip_config.rst @@ -2,10 +2,9 @@ ImageManipConfig ================ This message can is used for cropping, warping, rotating, resizing, etc. an image in runtime. -It is sent either from the host to :ref:`ColorCamera` or :ref:`ImageManip`. +It can be sent from host/:ref:`Script` node to either :ref:`ColorCamera` or :ref:`ImageManip`. -.. - It is sent either from the host or from the :ref:`Script` node to :ref:`ColorCamera` or :ref:`ImageManip`. +.. note:: This message will reconfigure the whole config of the node, meaning you need to set all settings, not just the setting you want to change. Examples of functionality ######################### diff --git a/docs/source/components/messages/tof_config.rst b/docs/source/components/messages/tof_config.rst new file mode 100644 index 000000000..1681c1df4 --- /dev/null +++ b/docs/source/components/messages/tof_config.rst @@ -0,0 +1,31 @@ +ToFConfig +========= + +This message is used to configure the :ref:`ToF` node. + +Examples of functionality +######################### + +- :ref:`ToF depth` + +Reference +######### + +.. tabs:: + + .. tab:: Python + + .. autoclass:: depthai.ToFConfig + :members: + :inherited-members: + :noindex: + + .. tab:: C++ + + .. doxygenclass:: dai::ToFConfig + :project: depthai-core + :members: + :private-members: + :undoc-members: + +.. include:: ../../includes/footer-short.rst diff --git a/docs/source/components/nodes/color_camera.rst b/docs/source/components/nodes/color_camera.rst index 6323af090..d9718f8f7 100644 --- a/docs/source/components/nodes/color_camera.rst +++ b/docs/source/components/nodes/color_camera.rst @@ -87,7 +87,7 @@ Usage pipeline = dai.Pipeline() cam = pipeline.create(dai.node.ColorCamera) cam.setPreviewSize(300, 300) - cam.setBoardSocket(dai.CameraBoardSocket.RGB) + cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) cam.setInterleaved(False) cam.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB) @@ -97,7 +97,7 @@ Usage dai::Pipeline pipeline; auto cam = pipeline.create(); cam->setPreviewSize(300, 300); - cam->setBoardSocket(dai::CameraBoardSocket::RGB); + cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); cam->setInterleaved(false); cam->setColorOrder(dai::ColorCameraProperties::ColorOrder::RGB); diff --git a/docs/source/components/nodes/image_manip.rst b/docs/source/components/nodes/image_manip.rst index 3d3b69a6b..bb466e9fb 100644 --- a/docs/source/components/nodes/image_manip.rst +++ b/docs/source/components/nodes/image_manip.rst @@ -76,7 +76,7 @@ ImageManip node supports the following image formats (more info `in PR here `__ (`datasheet here `__) 9-axis sensor, combining accelerometer, gyroscope, and magnetometer. It also does sensor fusion on the (IMU) chip itself. We have efficiently integrated `this driver `__ into the DepthAI. -- `BMI270 `__ 6-axis sensor, combining accelerometer and gyroscope +- `BMI270 `__ 6-axis sensor, combining accelerometer and gyroscope. -. The IMU chip is connected to the `RVC `__ -over SPI. +The IMU chip is connected to the `RVC `__ +over SPI. See `OAK Hardware documentation `__ to check whether your OAK camera has IMU integrated. How to place it @@ -48,7 +48,62 @@ Limitations ########### - For BNO086, gyroscope frequency above 400Hz can produce some jitter from time to time due to sensor HW limitation. -- **Maximum frequencies**: 500 Hz raw accelerometer, 1000 Hz raw gyroscope values individually, and 500Hz combined (synced) output. You can obtain the combined synced 500Hz output with :code:`imu.enableIMUSensor([dai.IMUSensor.ACCELEROMETER_RAW, dai.IMUSensor.GYROSCOPE_RAW], 500)`. + +IMU sensor frequencies +###################### + +Below are the discrete **stable frequencies** available for each (raw) IMU sensor. Some maximum IMU frequencies are higher, eg. +for BNO086, maximum frequency for gyroscope is 1000Hz, but up to 400Hz is stable (due to driver limitation). + +**BNO086:** + +Note that BNO IMU "rounds up" the input frequency to the next available frequency. For example, if you set the frequency to 101 it will round it to 200Hz. + +- Accelerometer: 15Hz, 31Hz, 62Hz, 125Hz, 250Hz 500Hz +- Gyroscope: 25Hz, 33Hz, 50Hz, 100Hz, 200Hz, 400Hz +- Magnetometer: 100Hz + +**BNO086 max frequency:** + +.. list-table:: + :header-rows: 1 + + * - BNO086 Sensor + - Max Frequency + * - ``ACCELEROMETER_RAW`` + - 512 Hz + * - ``ACCELEROMETER`` + - 512 Hz + * - ``LINEAR_ACCELERATION`` + - 400 Hz + * - ``GRAVITY`` + - 400 Hz + * - ``GYROSCOPE_RAW`` + - 1000 Hz + * - ``GYROSCOPE_CALIBRATED`` / ``GYROSCOPE_UNCALIBRATED`` + - 100 Hz + * - ``MAGNETOMETER_RAW`` + - 100 Hz + * - ``MAGNETOMETER_CALIBRATED`` / ``MAGNETOMETER_UNCALIBRATED`` + - 100 Hz + * - ``ROTATION_VECTOR`` + - 400 Hz + * - ``GAME_ROTATION_VECTOR`` + - 400 Hz + * - ``GEOMAGNETIC_ROTATION_VECTOR`` + - 100 Hz + * - ``ARVR_STABILIZED_ROTATION_VECTOR`` + - 100 Hz + * - ``ARVR_STABILIZED_GAME_ROTATION_VECTOR`` + - 100 Hz + +**BMI270:** + +Note that BMI279 "rounds down" the input frequency to the next available frequency. For example, if you set the frequency to 99 it will round it to 50Hz. +Additionally, the current max frequency of ~250 Hz is set when the input is >400Hz. + +- Accelerometer: 25Hz, 50Hz, 100Hz, 200Hz, 250Hz +- Gyroscope: 25Hz, 50Hz, 100Hz, 200Hz, 250Hz Usage ##### @@ -83,22 +138,6 @@ Usage // useful to reduce device's CPU load and number of lost packets, if CPU load is high on device side due to multiple nodes imu->setMaxBatchReports(10); -IMU devices -########### - -List of devices that have an IMU sensor on-board: - -* `OAK-D `__ -* `OAK-D-PoE `__ -* `OAK-D CM4 PoE `__ -* `OAK-FFC-3P `__ -* `OAK-FFC-4P `__ -* `OAK-D Pro `__ (All varients) -* `OAK-D S2 `__ (All varients) -* `OAK-D S2 PoE `__ (All varients) -* `OAK-D Pro PoE `__ (All varients) - - IMU sensors ########### diff --git a/docs/source/components/nodes/mobilenet_spatial_detection_network.rst.rst b/docs/source/components/nodes/mobilenet_spatial_detection_network.rst.rst index 5f994aa94..dbed45c7f 100644 --- a/docs/source/components/nodes/mobilenet_spatial_detection_network.rst.rst +++ b/docs/source/components/nodes/mobilenet_spatial_detection_network.rst.rst @@ -91,6 +91,8 @@ Examples of functionality - :ref:`RGB & MobilenetSSD with spatial data` - :ref:`Mono & MobilenetSSD with spatial data` +.. include:: /includes/spatial-coords.rst + Reference ######### diff --git a/docs/source/components/nodes/mono_camera.rst b/docs/source/components/nodes/mono_camera.rst index 3995de685..e5b278337 100644 --- a/docs/source/components/nodes/mono_camera.rst +++ b/docs/source/components/nodes/mono_camera.rst @@ -48,14 +48,14 @@ Usage pipeline = dai.Pipeline() mono = pipeline.create(dai.node.MonoCamera) - mono.setBoardSocket(dai.CameraBoardSocket.RIGHT) + mono.setCamera("right") mono.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) .. code-tab:: c++ dai::Pipeline pipeline; auto mono = pipeline.create(); - mono->setBoardSocket(dai::CameraBoardSocket::RIGHT); + mono->setCamera("right"); mono->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); Examples of functionality diff --git a/docs/source/components/nodes/object_tracker.rst b/docs/source/components/nodes/object_tracker.rst index 5fdb9e9cc..579db9321 100644 --- a/docs/source/components/nodes/object_tracker.rst +++ b/docs/source/components/nodes/object_tracker.rst @@ -73,7 +73,7 @@ A similar comparison of object trackers with more information can be found `here Maximum number of tracked objects ################################# -:code:`SHORT_TERM_KCF` can track up to 60 objects at once, while all other trackers can (theoretically) track up to 1000 objects at once. +**ObjectTracker** node can track up to 60 objects at once. At the moment the firmware crashes if there are more than 60 objects to track. Usage ##### diff --git a/docs/source/components/nodes/script.rst b/docs/source/components/nodes/script.rst index 45efcf6db..96564fa37 100644 --- a/docs/source/components/nodes/script.rst +++ b/docs/source/components/nodes/script.rst @@ -217,6 +217,8 @@ Examples of functionality - :ref:`Script camera control` - Controlling the camera - :ref:`Script get local IP` - Get local IP - :ref:`Script HTTP client` - Send HTTP request +- `Script TCP streaming `__ - TCP communication from within Script node, either in host or client mode +- `Script MQTT publishing `__ - MQTT publishing from within Script node - :ref:`Script HTTP server` - still image over HTTP - :ref:`Script MJPEG server` - MJPEG video stream over HTTP - :ref:`Script NNData example` - Constructs :ref:`NNData` diff --git a/docs/source/components/nodes/spatial_location_calculator.rst b/docs/source/components/nodes/spatial_location_calculator.rst index eea140d6a..c908566fe 100644 --- a/docs/source/components/nodes/spatial_location_calculator.rst +++ b/docs/source/components/nodes/spatial_location_calculator.rst @@ -96,6 +96,8 @@ Examples of functionality - :ref:`Spatial location calculator` +.. include:: /includes/spatial-coords.rst + Reference ######### diff --git a/docs/source/components/nodes/stereo_depth.rst b/docs/source/components/nodes/stereo_depth.rst index 6ae99ed25..6947eaec5 100644 --- a/docs/source/components/nodes/stereo_depth.rst +++ b/docs/source/components/nodes/stereo_depth.rst @@ -1,7 +1,8 @@ StereoDepth ########### -StereoDepth node calculates the disparity/depth from the stereo camera pair (2x :ref:`MonoCamera `). +StereoDepth node calculates the disparity and/or depth from the stereo camera pair (2x :ref:`MonoCamera `/:ref:`ColorCamera`). +We suggest following :ref:`Configuring Stereo Depth` tutorial to achieve the best depth results. How to place it =============== @@ -47,28 +48,29 @@ Inputs and Outputs .. tab:: **Inputs** - - :code:`left` - :ref:`ImgFrame` from the left :ref:`MonoCamera` - - :code:`right` - :ref:`ImgFrame` from the right :ref:`MonoCamera` - - :code:`inputConfig` - :ref:`StereoDepthConfig` + - ``left`` - :ref:`ImgFrame` from the left stereo camera + - ``right`` - :ref:`ImgFrame` from the right stereo camera + - ``inputConfig`` - :ref:`StereoDepthConfig` .. tab:: **Outputs** - - :code:`confidenceMap` - :ref:`ImgFrame` - - :code:`rectifiedLeft` - :ref:`ImgFrame` - - :code:`syncedLeft` - :ref:`ImgFrame` - - :code:`depth` - :ref:`ImgFrame`: UINT16 values - depth in depth units (millimeter by default) - - :code:`disparity` - :ref:`ImgFrame`: UINT8 or UINT16 if Subpixel mode - - :code:`rectifiedRight` - :ref:`ImgFrame` - - :code:`syncedRight` - :ref:`ImgFrame` - - :code:`outConfig` - :ref:`StereoDepthConfig` + - ``confidenceMap`` - :ref:`ImgFrame` + - ``rectifiedLeft`` - :ref:`ImgFrame` + - ``syncedLeft`` - :ref:`ImgFrame` + - ``depth`` - :ref:`ImgFrame`: UINT16 values - depth in depth units (millimeter by default) + - ``disparity`` - :ref:`ImgFrame`: UINT8 or UINT16 if Subpixel mode + - ``rectifiedRight`` - :ref:`ImgFrame` + - ``syncedRight`` - :ref:`ImgFrame` + - ``outConfig`` - :ref:`StereoDepthConfig` .. tab:: **Debug outputs** - - :code:`debugDispLrCheckIt1` - :ref:`ImgFrame` - - :code:`debugDispLrCheckIt2` - :ref:`ImgFrame` - - :code:`debugExtDispLrCheckIt1` - :ref:`ImgFrame` - - :code:`debugExtDispLrCheckIt2` - :ref:`ImgFrame` - - :code:`debugDispCostDump` - :ref:`ImgFrame` + - ``debugDispLrCheckIt1`` - :ref:`ImgFrame` + - ``debugDispLrCheckIt2`` - :ref:`ImgFrame` + - ``debugExtDispLrCheckIt1`` - :ref:`ImgFrame` + - ``debugExtDispLrCheckIt2`` - :ref:`ImgFrame` + - ``debugDispCostDump`` - :ref:`ImgFrame` + - ``confidenceMap`` - :ref:`ImgFrame` Internal block diagram of StereoDepth node ========================================== @@ -168,6 +170,8 @@ Limitations =========== - Median filtering is disabled when subpixel mode is set to 4 or 5 bits. +- For RGB-depth alignment the RGB camera has to be placed on the same horizontal line as the stereo camera pair. +- RGB-depth alignment doesn't work when using disparity shift. Stereo depth FPS ================ @@ -241,7 +245,7 @@ Examples of functionality ========================= - :ref:`Depth Preview` -- :ref:`RGB Depth alignment` +- :ref:`RGB Depth alignment` - align depth to color camera - :ref:`Mono & MobilenetSSD & Depth` - :ref:`RGB & MobilenetSSD with spatial data` @@ -269,15 +273,10 @@ Disparity ========= Disparity refers to the distance between two corresponding points in the left and right image of a stereo pair. -By looking at the image below, it can be seen that point :code:`X` gets projected to :code:`XL = (u, v)` in the :code:`Left view` and :code:`XR = (p, q)` in the :code:`Right view`. .. image:: /_static/images/components/disparity_explanation.jpeg :target: https://stackoverflow.com/a/17620159 -Since we know points :code:`XL` and :code:`XR` refer to the same point: :code:`X`, the disparity for this point is equal to the magnitude of the vector between :code:`(u, v)` and :code:`(p, q)`. - -For a more detailed explanation see `this `__ answer on Stack Overflow. - When calculating the disparity, each pixel in the disparity map gets assigned a confidence value :code:`0..255` by the stereo matching algorithm, as: @@ -311,7 +310,7 @@ written in camera intrinsics (``intrinsics[0][0]``): with dai.Device() as device: calibData = device.readCalibration() - intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT) + intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C) print('Right mono camera focal length in pixels:', intrinsics[0][0]) Here's theoretical calculation of the focal length in pixels: @@ -319,10 +318,10 @@ Here's theoretical calculation of the focal length in pixels: .. code-block:: python focal_length_in_pixels = image_width_in_pixels * 0.5 / tan(HFOV * 0.5 * PI/180) - + # With 400P mono camera resolution where HFOV=71.9 degrees focal_length_in_pixels = 640 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 441.25 - + # With 800P mono camera resolution where HFOV=71.9 degrees focal_length_in_pixels = 1280 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 882.5 @@ -332,7 +331,7 @@ Examples for calculating the depth value, using the OAK-D (7.5cm baseline): # For OAK-D @ 400P mono cameras and disparity of eg. 50 pixels depth = 441.25 * 7.5 / 50 = 66.19 # cm - + # For OAK-D @ 800P mono cameras and disparity of eg. 10 pixels depth = 882.5 * 7.5 / 10 = 661.88 # cm @@ -423,7 +422,7 @@ So using this formula for existing models the *theoretical* max distance is: # For OAK-D (7.5cm baseline) Dm = (7.5/2) * tan((90 - 71.9/1280)*pi/180) = 3825.03cm = 38.25 meters - + # For OAK-D-CM4 (9cm baseline) Dm = (9/2) * tan((90 - 71.9/1280)*pi/180) = 4590.04cm = 45.9 meters @@ -459,12 +458,12 @@ on the following picture. Meaning of variables on the picture: -- :code:`BL [cm]` - Baseline of stereo cameras. -- :code:`Dv [cm]` - Minimum distace where both cameras see an object (thus where depth can be calculated). -- :code:`B [pixels]` - Width of the band where depth cannot be calculated. -- :code:`W [pixels]` - Width of mono in pixels camera or amount of horizontal pixels, also noted as :code:`HPixels` in other formulas. -- :code:`D [cm]` - Distance from the cameras to an object. -- :code:`F [cm]` - Width of image at the distance :code:`D`. +- ``BL [cm]`` - Baseline of stereo cameras. +- ``Dv [cm]`` - Minimum distace where both cameras see an object (thus where depth can be calculated). +- ``B [pixels]`` - Width of the band where depth cannot be calculated. +- ``W [pixels]`` - Width of mono in pixels camera or amount of horizontal pixels, also noted as :code:`HPixels` in other formulas. +- ``D [cm]`` - Distance from the **camera plane** to an object (see image :ref:`here `). +- ``F [cm]`` - Width of image at the distance ``D``. .. image:: https://user-images.githubusercontent.com/59799831/135310972-c37ba40b-20ad-4967-92a7-c71078bcef99.png diff --git a/docs/source/components/nodes/tof.rst b/docs/source/components/nodes/tof.rst new file mode 100644 index 000000000..149cb8c50 --- /dev/null +++ b/docs/source/components/nodes/tof.rst @@ -0,0 +1,110 @@ +ToF +=== + +**ToF node** is used for converting the raw data from the ToF sensor into a depth map. Currently, these 2 products contain a ToF sensor: + +- `OAK-D SR PoE `__ - integrated 33D ToF sensor, together with a stereo camera pair +- `OAK-FFC ToF 33D `__ - standalone FFC module with a 33D ToF sensor + +ToF's ``depth`` output can be used instead of :ref:`StereoDepth`'s - so you can link ToF.depth to :ref:`MobileNetSpatialDetectionNetwork`/:ref:`YoloSpatialDetectionNetwork` or +:ref:`SpatialLocationCalculator` directly. + +How to place it +############### + +.. tabs:: + + .. code-tab:: py + + pipeline = dai.Pipeline() + warp = pipeline.create(dai.node.ToF) + + .. code-tab:: c++ + + dai::Pipeline pipeline; + auto warp = pipeline.create(); + +Inputs and Outputs +################## + +.. code-block:: + + ┌───────────┐ depth + inputConfig | ├────────► + ───────────►│ | amplitude + input | ToF ├────────► + ───────────►│ │ error + │ ├────────► + └───────────┘ + +**Message types** + +- ``inputConfig`` - :ref:`ToFConfig` +- ``input`` - :ref:`ImgFrame` +- ``depth`` - :ref:`ImgFrame` +- ``amplitude`` - :ref:`ImgFrame` +- ``error`` - :ref:`ImgFrame` + +Usage +##### + +.. tabs:: + + .. code-tab:: py + + pipeline = dai.Pipeline() + + tof_cam = pipeline.create(dai.node.Camera) + # We assume the ToF camera sensor is on port CAM_A + tof_cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) + + tof = pipeline.create(dai.node.ToF) + # ToF node converts raw sensor frames into depth + tof_cam.raw.link(tof.input) + + # Send ToF depth output to the host, or perhaps to SLC / Spatial Detection Network + tof.depth.link(xout.input) + + .. code-tab:: c++ + + dai::Pipeline pipeline; + + auto tofCam = pipeline.create(); + // We assume the ToF camera sensor is on port CAM_A + tofCam->setBoardSocket(dai::CameraBoardSocket::AUTO); + + auto tof = pipeline.create(); + // ToF node converts raw sensor frames into depth + tofCam->raw.link(tof->input); + + auto xout = pipeline.create(); + xout->setStreamName("depth"); + // Send ToF depth output to the host + tof->depth.link(xout->input); + +Examples of functionality +######################### + +- :ref:`ToF depth` + +Reference +######### + +.. tabs:: + + .. tab:: Python + + .. autoclass:: depthai.node.ToF + :members: + :inherited-members: + :noindex: + + .. tab:: C++ + + .. doxygenclass:: dai::node::ToF + :project: depthai-core + :members: + :private-members: + :undoc-members: + +.. include:: ../../includes/footer-short.rst diff --git a/docs/source/components/nodes/video_encoder.rst b/docs/source/components/nodes/video_encoder.rst index 89fb48f0b..4f899de7a 100644 --- a/docs/source/components/nodes/video_encoder.rst +++ b/docs/source/components/nodes/video_encoder.rst @@ -1,7 +1,8 @@ VideoEncoder ============ -VideoEncoder node is used to encode :ref:`image frames ` into H264/H265/JPEG. +VideoEncoder node is used to encode :ref:`ImgFrame` into either H264, H265, or MJPEG streams. Only NV12 or GRAY8 (which gets converted to NV12) format is +supported as an input. All codecs are lossy (except lossless MJPEG), for more information please see `encoding quality docs `__. .. include:: /includes/container-encoding.rst @@ -36,7 +37,7 @@ Inputs and Outputs **Message types** -- :code:`input` - :ref:`ImgFrame` +- :code:`input` - :ref:`ImgFrame` (NV12/GRAY8) - :code:`bitstream` - :ref:`ImgFrame` Usage @@ -55,7 +56,7 @@ Usage # Create MJPEG encoding for still images stillEncoder = pipeline.create(dai.node.VideoEncoder) - stillEncoder.setDefaultProfilePreset(cam.getStillSize(), 1, dai.VideoEncoderProperties.Profile.MJPEG) + stillEncoder.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG) cam.still.link(stillEncoder.input) cam.video.link(videoEncoder.input) @@ -67,11 +68,11 @@ Usage // Create ColorCamera beforehand // Set H265 encoding for the ColorCamera video output auto videoEncoder = pipeline.create(); - videoEncoder->setDefaultProfilePreset(cam->getVideoSize(), cam->getFps(), dai::VideoEncoderProperties::Profile::H265_MAIN); + videoEncoder->setDefaultProfilePreset(cam->getFps(), dai::VideoEncoderProperties::Profile::H265_MAIN); // Create MJPEG encoding for still images stillEncoder = pipeline.create(dai.node.VideoEncoder); - stillEncoder->setDefaultProfilePreset(cam->getStillSize(), 1, dai::VideoEncoderProperties::Profile::MJPEG); + stillEncoder->setDefaultProfilePreset(1, dai::VideoEncoderProperties::Profile::MJPEG); cam->still.link(stillEncoder->input); cam->video.link(videoEncoder->input); diff --git a/docs/source/components/nodes/warp.rst b/docs/source/components/nodes/warp.rst new file mode 100644 index 000000000..b707990d0 --- /dev/null +++ b/docs/source/components/nodes/warp.rst @@ -0,0 +1,114 @@ +Warp +==== + +Warp node is used for image warping and dewarping, which can be used to undistort images from wide FOV cameras. +The node can also be used to apply a perspective transform to the image. + +Compared to :ref:`ImageManip` node (the `setWarpMesh()` function): + +**Warp node** uses underlyting warp HW block (additional `docs here `__), +with no extra resources (SHAVE/cmx cores). HW limitation: **width must be divisible by 16.** + +**ImageManip node** combines the power of warp HW block together the efficiency of CMX memory to achieve higher +throughput (e.g. 4k@30 fps). Scheduling of the HW block is done by SHAVE cores which also do color space conversion, type conversion (YUV420 to NV12), etc. +The downside of using ImageManip node is extra RAM and SHAVE usage. + +How to place it +############### + +.. tabs:: + + .. code-tab:: py + + pipeline = dai.Pipeline() + warp = pipeline.create(dai.node.Warp) + + .. code-tab:: c++ + + dai::Pipeline pipeline; + auto warp = pipeline.create(); + +Inputs and Outputs +################## + +.. code-block:: + + ┌────────────┐ + inputImage │ │ out + ──────────►│ Warp ├──────► + │ │ + └────────────┘ + +**Message types** + +- ``inputImage`` - :ref:`ImgFrame` +- ``out`` - :ref:`ImgFrame` + +Usage +##### + +.. tabs:: + + .. code-tab:: py + + pipeline = dai.Pipeline() + + warp = pipeline.create(dai.node.Warp) + # Create a custom warp mesh + p1 = dai.Point2f(20, 20) + p2 = dai.Point2f(460, 20) + p3 = dai.Point2f(20, 460) + p4 = dai.Point2f(460, 460) + warp.setWarpMesh([p1,p2,p3,p4], 2, 2) + warp.setOutputSize((512,512)) + warp.setMaxOutputFrameSize(512 * 512 * 3) + # Warp engines to be used (0,1,2) + warp.setHwIds([1]) + # Warp interpolation mode, choose between BILINEAR, BICUBIC, BYPASS + warp.setInterpolation(dai.node.Warp.Properties.Interpolation.BYPASS) + + .. code-tab:: c++ + + dai::Pipeline pipeline; + + auto warp = pipeline.create(); + // Create a custom warp mesh + dai::Point2f p1(20, 20); + dai::Point2f p2(460, 20); + dai::Point2f p3(20, 460); + dai::Point2f p4(460, 460); + warp->setWarpMesh({p1,p2,p3,p4}, 2, 2); + warp->setOutputSize({512, 512}); + warp->setMaxOutputFrameSize(512 * 512 * 3); + // Warp engines to be used (0,1,2) + warp->setHwIds({1}); + // Warp interpolation mode, choose between BILINEAR, BICUBIC, BYPASS + warp->setInterpolation(dai::node::Warp::Properties::Interpolation::BYPASS); + +Examples of functionality +######################### + +- :ref:`Warp Mesh` +- :ref:`Interactive Warp Mesh` + +Reference +######### + +.. tabs:: + + .. tab:: Python + + .. autoclass:: depthai.node.Warp + :members: + :inherited-members: + :noindex: + + .. tab:: C++ + + .. doxygenclass:: dai::node::Warp + :project: depthai-core + :members: + :private-members: + :undoc-members: + +.. include:: ../../includes/footer-short.rst diff --git a/docs/source/components/nodes/yolo_detection_network.rst b/docs/source/components/nodes/yolo_detection_network.rst index f47827cd6..e01bb2bb1 100644 --- a/docs/source/components/nodes/yolo_detection_network.rst +++ b/docs/source/components/nodes/yolo_detection_network.rst @@ -1,9 +1,8 @@ YoloDetectionNetwork ==================== -Yolo detection network node is very similar to :ref:`NeuralNetwork` (in fact it extends it). The only difference is that this node -is specifically for the **(tiny) Yolo V3/V4** NN and it decodes the result of the NN on device. This means that :code:`Out` of this node is not a -:ref:`NNData` (a byte array) but a :ref:`ImgDetections` that can easily be used in your code. +Yolo detection network extends :ref:`NeuralNetwork` node by also adding **YOLO NN result decoding**, which happens on the OAK device. +This means that :code:`Out` of this node is not a :ref:`NNData` (a byte array) but a :ref:`ImgDetections` that can easily be used in your code. How to place it ############### diff --git a/docs/source/components/nodes/yolo_spatial_detection_network.rst b/docs/source/components/nodes/yolo_spatial_detection_network.rst index 1cd6de633..ba91df0a3 100644 --- a/docs/source/components/nodes/yolo_spatial_detection_network.rst +++ b/docs/source/components/nodes/yolo_spatial_detection_network.rst @@ -94,6 +94,8 @@ Examples of functionality - :ref:`RGB & TinyYolo with spatial data` +.. include:: /includes/spatial-coords.rst + Reference ######### diff --git a/docs/source/includes/footer-short.rst b/docs/source/includes/footer-short.rst index e868bce60..67cb68912 100644 --- a/docs/source/includes/footer-short.rst +++ b/docs/source/includes/footer-short.rst @@ -1,28 +1,5 @@ .. raw:: html -

Got questions?

+

Got questions?

-We're always happy to help with code or other questions you might have. - -.. raw:: html - -
-
- - Discord -
Community Discord
-
-
-
- - forum -
Discussion Forum
-
-
-
- - forum -
Email Support
-
-
-
+ Head over to Discussion Forum for technical support or any other questions you might have. diff --git a/docs/source/includes/spatial-coords.rst b/docs/source/includes/spatial-coords.rst new file mode 100644 index 000000000..3e721d21d --- /dev/null +++ b/docs/source/includes/spatial-coords.rst @@ -0,0 +1,8 @@ +Spatial coordinate system +^^^^^^^^^^^^^^^^^^^^^^^^^ + +OAK camera uses left-handed (Cartesian) coordinate system for all spatial coordiantes. + +.. image:: https://github.com/luxonis/depthai-python/assets/18037362/f9bfaa0c-0286-46c0-910c-77c1337493e1 + +Middle of the frame is 0,0 in terms of X,Y coordinates. If you go up, Y will increase, and if you go right, X will increase. \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index fd203585f..cb2651049 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -6,8 +6,9 @@ DepthAI API Documentation ========================= -.. image:: https://github.com/luxonis/depthai-python/workflows/Python%20Wheel%20CI/badge.svg?branch=gen2_develop - :target: https://github.com/luxonis/depthai-python/actions?query=workflow%3A%22Python+Wheel+CI%22+branch%3A%22gen2_develop%22 +.. + .. image:: https://github.com/luxonis/depthai-python/workflows/Python%20Wheel%20CI/badge.svg?branch=gen2_develop + :target: https://github.com/luxonis/depthai-python/actions?query=workflow%3A%22Python+Wheel+CI%22+branch%3A%22gen2_develop%22 DepthAI API allows users to connect to, configure and communicate with their OAK devices. We support both :ref:`Python API ` and :ref:`C++ API `. @@ -70,9 +71,9 @@ node functionalities are presented with code. :hidden: :caption: Tutorials: - tutorials/hello_world.rst tutorials/standalone_mode.rst tutorials/message_syncing.rst + tutorials/configuring-stereo-depth.rst tutorials/multiple.rst tutorials/maximize_fov.rst tutorials/debugging.rst @@ -80,6 +81,7 @@ node functionalities are presented with code. tutorials/dispaying_detections.rst tutorials/image_quality.rst tutorials/low-latency.rst + tutorials/hello_world.rst .. toctree:: :maxdepth: 1 diff --git a/docs/source/install.rst b/docs/source/install.rst index 2d7039c9f..2a42fa95a 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -1,49 +1,86 @@ Installation ============ -Please install the necessary dependencies for your platform by :ref:`referring to the table below `. -Once installed, you can :ref:`install the DepthAI library `. +.. note:: -We are constantly striving to improve how we release our software to keep up -with countless platforms and the numerous ways to package it. If you do not -see a particular platform or package format listed below please reach out to -us on `Discord `__ -or on `Github `__. + Refer to `DepthAI installer documentation `__ to install + full `DepthAI demo `__ and its dependencies and requirements. + +Follow the steps below to just install depthai api library dependencies for your platform. + +.. tabs:: + + .. tab:: **macOS** + + Execute the script below to install macOS dependencies: + + .. code-block:: bash + + bash -c "$(curl -fL https://docs.luxonis.com/install_dependencies.sh)" + + Please refer to :ref:`documentation below ` if any issues occur. + + .. tab:: **Windows 10/11** + + Windows 10/11 users can either **install DepthAI dependencies** via `Windows Installer `__, + or follow :ref:`instructions below `. + + .. tab:: **Linux** + + Execute the script below to install Linux dependencies: + + .. code-block:: bash + + sudo wget -qO- https://docs.luxonis.com/install_dependencies.sh | bash + + Please refer to :ref:`Supported Platforms` if any issues occur. + +Once installed, you can :ref:`install the DepthAI library ` from PyPI. Supported Platforms ################### -======================== ============================================== ================================================================================ -Platform Instructions Support -======================== ============================================== ================================================================================ -Windows 10 :ref:`Platform dependencies ` `Discord `__ -macOS :ref:`Platform dependencies ` `Discord `__ -Ubuntu :ref:`Platform dependencies ` `Discord `__ -Raspberry Pi OS :ref:`Platform dependencies ` `Discord `__ -Jestson Nano/Xavier :ref:`Platform dependencies ` `Discord `__ -======================== ============================================== ================================================================================ - -The following platforms are also supported by a combination of the community and Luxonis: - -====================== =========================================================================== ================================================================================ -Platform Instructions Support -====================== =========================================================================== ================================================================================ -Fedora `Discord `__ -Robot Operating System Follow tutorial at `depthai-ros `__ `Discord `__ -Windows 7 :ref:`WinUSB driver ` `Discord `__ -Docker :ref:`Pull and run official images ` `Discord `__ -Kernel Virtual Machine :ref:`Run on KVM ` `Discord `__ -VMware :ref:`Run on VMware ` `Discord `__ -Virtual Box :ref:`Run on Virtual Box ` `Discord `__ -WSL2 :ref:`Run on WSL2 ` / -====================== =========================================================================== ================================================================================ +See documentation below for other platforms or additional information. + +.. list-table:: + :header-rows: 1 + + * - Platform + - Instructions + * - Windows 10/11 + - :ref:`Platform dependencies ` + * - macOS + - :ref:`Platform dependencies ` + * - Ubuntu + - :ref:`Platform dependencies ` + * - Raspberry Pi OS + - :ref:`Platform dependencies ` + * - Jetson Nano/Xavier + - :ref:`Platform dependencies ` + * - ROS + - Follow tutorial at `depthai-ros `__ + * - Windows 7 + - :ref:`WinUSB driver ` + * - Docker + - :ref:`Pull and run official images ` + * - Kernel Virtual Machine + - :ref:`Run on KVM ` + * - VMware + - :ref:`Run on VMware ` + * - Virtual Box + - :ref:`Run on Virtual Box ` + * - WSL2 + - :ref:`Run on WSL2 ` + +If you do not see a particular platform or package format listed below please reach out to us on `Discord `__ +or on `Github `__. macOS ***** .. code-block:: bash - bash -c "$(curl -fL https://docs.luxonis.com/install_dependencies.sh)" + bash -c "$(curl -fL https://docs.luxonis.com/install_depthai.sh)" Close and re-open the terminal window after this command. @@ -51,8 +88,7 @@ The script also works on M1 Macs, Homebrew being installed under Rosetta 2, as s support. In case you already have Homebrew installed natively and things don't work, see `here `__ for some additional troubleshooting steps. -Note that if the video streaming window does not appear consider running the -following: +Note that if the video streaming window does not appear consider running the following: .. code-block:: bash @@ -60,51 +96,7 @@ following: See the `Video preview window fails to appear on macOS `_ thread on our forum for more information. -M1 Mac build wheels natively ----------------------------- - -In order to run DepthAI natively on M1 Mac, you currently need to build the wheels locally. We will add pre-building M1 wheels -in Q2 of 2022, so this won't be needed anymore. - -This tutorial was provided by whab and tested on a MacBookPro M1 Pro running macOS Monterey 12.1 with a OAK-D-Lite. - -.. code-block:: bash - - # Install native M1 version of brew - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" - echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile - eval "$(/opt/homebrew/bin/brew shellenv)" - - # Install conda to create virtual environments for Python - brew install --cask miniconda - conda init zsh - - # Close and re-open a Terminal window - - # Install DepthAI by building a M1 wheel (inside ~/DepthAI/) - conda create --name DepthAIEnv39 python=3.9 - conda activate DepthAIEnv39 - python3 -m pip install -U pip - brew update - brew install cmake libusb - cd ~; mkdir DepthAI; cd DepthAI - git clone --recursive https://github.com/luxonis/depthai-python.git - cd depthai-python - mkdir build && cd build - # Build depthai-python - cmake .. - cmake --build . --parallel - cd .. - python3 -m pip wheel . -w wheelhouse - pip install wheelhouse/depthai-* - - # Test DepthAI with a OAK plugged to your new M1 Mac - cd examples - nano install_requirements.py - # Remove code of block (3 lines) starting with: if thisPlatform == "arm64" and platform.system() == "Darwin": - # Remove code of block (48 lines) starting with: if not args.skip_depthai: - python3 install_requirements.py - python3 ColorCamera/rgb_preview.py +We provide **Mac M1 prebuilt Python wheels** for depthai since the version ``2.17.3.1``. Ubuntu ****** @@ -124,15 +116,13 @@ Note! If opencv fails with illegal instruction after installing from PyPi, add: Raspberry Pi OS *************** - + .. code-block:: bash - - sudo curl -fL https://docs.luxonis.com/install_dependencies.sh | bash + sudo curl -fL https://docs.luxonis.com/install_dependencies.sh | bash We have also prepared `pre-configured RPi images `__ so you can get up & running faster. - Jetson ****** @@ -142,7 +132,7 @@ perform the following steps, after completing a fresh install and setup. On the This first step is optional: go to the *Software* (App Store) and delete the apps or software that you probably will not use. Open a terminal window and run the following commands: - + .. code-block:: bash sudo apt update && sudo apt upgrade @@ -231,8 +221,7 @@ For openSUSE, available `in this official article `__ and use it to install DepthAI's dependencies do the following: @@ -444,7 +433,8 @@ And then for each additional depthai/OAK device you would like to pass through, Install from PyPI ################# -Our packages are distributed `via PyPi `__, to install it in your environment use +After installing depthai dependencies, you can either refer to depthai-core for C++ development, or download the depthai **Python library** +`via PyPi `__: .. code-block:: bash @@ -512,102 +502,100 @@ Other installation methods To get the latest and yet unreleased features from our source code, you can go ahead and compile depthai package manually. -Dependencies to build from source -********************************* +.. tabs:: -- CMake > 3.2.0 -- Generation tool (Ninja, make, ...) -- C/C++ compiler -- libusb1 development package + .. tab:: **Build from source** -.. _raspbian: + **Dependencies to build from source** -Ubuntu, Raspberry Pi OS, ... (Debian based systems) ---------------------------------------------------- + - CMake > 3.2.0 + - Generation tool (Ninja, make, ...) + - C/C++ compiler + - libusb1 development package -On Debian based systems (Raspberry Pi OS, Ubuntu, ...) these can be acquired by running: + .. tabs:: -.. code-block:: bash + .. tab:: Debian/Ubuntu/RPi OS - sudo apt-get -y install cmake libusb-1.0-0-dev build-essential + On Debian based systems (Raspberry Pi OS, Ubuntu, ...) these can be acquired by running: -macOS (Mac OS X) ----------------- + .. code-block:: bash -Assuming a stock Mac OS X install, `depthai-python `__ library needs following dependencies + sudo apt-get -y install cmake libusb-1.0-0-dev build-essential -- Homebrew (If it's not installed already) + .. tab:: maxOS - .. code-block:: bash + Assuming a stock Mac OS X install, `depthai-python `__ library needs following dependencies - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" + - Homebrew (If it's not installed already) -- Python, :code:`libusb`, CMake, :code:`wget` + .. code-block:: bash - .. code-block:: bash + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" - brew install coreutils python3 cmake libusb wget + - Python, :code:`libusb`, CMake, :code:`wget` -And now you're ready to clone the `depthai-python `__ from Github and build it for Mac OS X. + .. code-block:: bash -Install using GitHub commit -*************************** + brew install coreutils python3 cmake libusb wget -Pip allows users to install the packages from specific commits, even if they are not yet released on PyPi. + And now you're ready to clone the `depthai-python `__ from Github and build it for Mac OS X. -To do so, use the command below - and be sure to replace the :code:`` with the correct commit hash `from here `__ + .. tab:: **Install from commit** -.. code-block:: bash + Pip allows users to install the packages from specific commits, even if they are not yet released on PyPi. - python3 -m pip install git+https://github.com/luxonis/depthai-python.git@ + To do so, use the command below - and be sure to replace the :code:`` with the correct commit hash `from here `__ -Using/Testing a Specific Branch/PR -********************************** + .. code-block:: bash -From time to time, it may be of interest to use a specific branch. This may occur, for example, -because we have listened to your feature request and implemented a quick implementation in a branch. -Or it could be to get early access to a feature that is soaking in our :code:`develop` for stability purposes before being merged into :code:`main` -(:code:`develop` is the branch we use to soak new features before merging them into :code:`main`): + python3 -m pip install git+https://github.com/luxonis/depthai-python.git@ -So when working in the `depthai-python `__ repository, using a branch can be accomplished -with the following commands. + .. tab:: **Using specific branch/PR** -Prior to running the following, you can either clone the repository independently -(for not over-writing any of your local changes) or simply do a :code:`git pull` first. + From time to time, it may be of interest to use a specific branch. This may occur, for example, + because we have listened to your feature request and implemented a quick implementation in a branch. + Or it could be to get early access to a feature that is soaking in our :code:`develop` for stability purposes before being merged into :code:`main` + (:code:`develop` is the branch we use to soak new features before merging them into :code:`main`): -.. code-block:: bash + So when working in the `depthai-python `__ repository, using a branch can be accomplished + with the following commands. - git checkout - git submodule update --init --recursive - python3 setup.py develop + Prior to running the following, you can either clone the repository independently + (for not over-writing any of your local changes) or simply do a :code:`git pull` first. -Install from source -******************* + .. code-block:: bash -If desired, you can also install the package from the source code itself - it will allow you to make the changes -to the API and see them live in action. + git checkout + git submodule update --init --recursive + python3 setup.py develop -To do so, first download the repository and then add the package to your python interpreter in development mode + .. tab:: **Install from source** -.. code-block:: bash + If desired, you can also install the package from the source code itself - it will allow you to make the changes + to the API and see them live in action. - git clone https://github.com/luxonis/depthai-python.git - cd depthai-python - git submodule update --init --recursive - python3 setup.py develop # you may need to add sudo if using system interpreter instead of virtual environment + To do so, first download the repository and then add the package to your python interpreter in development mode -If you want to use other branch (e.g. :code:`develop`) than default (:code:`main`), you can do so by typing + .. code-block:: bash -.. code-block:: bash + git clone https://github.com/luxonis/depthai-python.git + cd depthai-python + git submodule update --init --recursive + python3 setup.py develop # you may need to add sudo if using system interpreter instead of virtual environment - git checkout develop # replace the "develop" with a desired branch name - git submodule update --recursive - python3 setup.py develop + If you want to use other branch (e.g. :code:`develop`) than default (:code:`main`), you can do so by typing -Or, if you want to checkout a specific commit, type + .. code-block:: bash -.. code-block:: bash + git checkout develop # replace the "develop" with a desired branch name + git submodule update --recursive + python3 setup.py develop + + Or, if you want to checkout a specific commit, type + + .. code-block:: bash - git checkout - git submodule update --recursive - python3 setup.py develop + git checkout + git submodule update --recursive + python3 setup.py develop diff --git a/docs/source/samples/MonoCamera/mono_preview_alternate_pro.rst b/docs/source/samples/MonoCamera/mono_preview_alternate_pro.rst new file mode 100644 index 000000000..7212d0476 --- /dev/null +++ b/docs/source/samples/MonoCamera/mono_preview_alternate_pro.rst @@ -0,0 +1,52 @@ +Mono preview - Alternate between dot projector and illumination LED +=================================================================== + +This example will alternate between the IR illumination LED and IR dot projector. By default, example script will run +both left and right monochrome camera sensors at 30FPS, and it will switch between the IR LED and dot projector +every frame - meaning you will get LED-illuminated frames at 15FPS, and dot projector-illuminated frames at 15FPS. + +LED-illuminated frames can be used for your `AI vision tasks `__ +and CV algorithms (eg. :ref:`Feature Tracker`) in low-light environments. Dot projector-illuminated frames are used for `active stereo depth `__. + +Demo +#### + +.. raw:: html + +
+ +
+ +On the video, we disabled both projector and LED for about a second, just to demonstrate how the scene looks +in almost-complete darkness. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/MonoCamera/mono_preview_alternate_pro.py + :language: python + :linenos: + + .. tab:: C++ + + Not yet implemented. + +.. + Also `available on GitHub `__ + + .. literalinclude:: ../../../../depthai-core/examples/MonoCamera/mono_preview_alternate_pro.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/Script/script_emmc_access.rst b/docs/source/samples/Script/script_emmc_access.rst new file mode 100644 index 000000000..c4db24eb0 --- /dev/null +++ b/docs/source/samples/Script/script_emmc_access.rst @@ -0,0 +1,64 @@ +Script EMMC access +================== + +.. note:: + + This example requires a device with onboard EMMC memory (e.g. OAK-1-POE). + To check whether your device has EMMC memory, run the bootloader version script at :ref:`Bootloader Version` and check whether the output contains ``Memory.EMMC``. + + + +This example shows how to use :ref:`Script` node to access EMMC memory of the device. Default location for EMMC memory is ``/media/mmcsd-0-0/``. The first script in the pipeline works by writing an image to EMMC memory. +The second script starts a webserver on ::code:`/media/mmcsd-0-0/` directory and serves the image from EMMC memory. + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Prerequisites +############# + +We first need to enable the EMMC memory as storage on the device. To do so, we need to flash the device with an application that has EMMC enabled. + +Example application: + +.. code-block:: python + + import depthai as dai + + # Create pipeline + pipeline = dai.Pipeline() + + # Set board config + board = dai.BoardConfig() + board.emmc = True + config = dai.Device.Config() + config.board = board + pipeline.setBoardConfig(board) + + (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() + bootloader = dai.DeviceBootloader(bl) + progress = lambda p : print(f'Flashing progress: {p*100:.1f}%') + (r, errmsg) = bootloader.flash(progress, pipeline, memory=dai.DeviceBootloader.Memory.EMMC) + if r: print("Flash OK") + + +The above code will flash the device with the application that enables the script node to access EMMC memory. Now we should be able to access EMMC memory even when the device is in standard mode (connected to the host PC). + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/Script/script_emmc_access.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/Script/script_uart.rst b/docs/source/samples/Script/script_uart.rst new file mode 100644 index 000000000..5e4656890 --- /dev/null +++ b/docs/source/samples/Script/script_uart.rst @@ -0,0 +1,49 @@ +Script UART communication +========================= + +This example uses :ref:`Script` node for `UART communication `__. Note that OAK +cameras don't have UART pins easily disposed, and we soldered wires on `OAK-FFC-4P `__ +to expose UART pins. + +.. note:: + + This should only be run on OAK-FFC-4P, as other OAK cameras might have different GPIO configuration. + +Demo +#### + +.. raw:: html + +
+ +
+ + +.. figure:: https://user-images.githubusercontent.com/18037362/232458809-a36dc418-6bb5-411f-9172-5130a926191d.jpg + + Oscilloscope connected to the OAK-FFC-4P UART pins + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/Script/script_uart.py + :language: python + :linenos: + + .. tab:: C++ + + Not yet available + + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst b/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst index 10392c5e6..585a5630f 100644 --- a/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst +++ b/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst @@ -11,7 +11,7 @@ scanning camera for mobile robots. Demo #### -.. image:: https://user-images.githubusercontent.com/18037362/190861621-b57fd1e3-5a3d-4d79-b1a7-d17a0b78c63e.gif +.. image:: https://user-images.githubusercontent.com/18037362/231822498-6e3699a0-039e-424b-acb2-b246575e91ee.png Setup ##### diff --git a/docs/source/samples/SpatialDetection/spatial_tiny_yolo.rst b/docs/source/samples/SpatialDetection/spatial_tiny_yolo.rst index 76b552a34..e0643ccbe 100644 --- a/docs/source/samples/SpatialDetection/spatial_tiny_yolo.rst +++ b/docs/source/samples/SpatialDetection/spatial_tiny_yolo.rst @@ -1,7 +1,7 @@ RGB & TinyYolo with spatial data ================================ -This example shows how to run TinyYoloV3 and v4 on the RGB input frame, and how to display both the RGB +This example shows how to run Yolo on the RGB input frame, and how to display both the RGB preview, detections, depth map and spatial information (X,Y,Z). It's similar to example :ref:`RGB & MobilenetSSD with spatial data` except it is running TinyYolo network. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/StereoDepth/rgb_depth_aligned.rst b/docs/source/samples/StereoDepth/rgb_depth_aligned.rst index fbdd16264..4c854cbe6 100644 --- a/docs/source/samples/StereoDepth/rgb_depth_aligned.rst +++ b/docs/source/samples/StereoDepth/rgb_depth_aligned.rst @@ -12,6 +12,9 @@ By default, the depth map will get scaled to match the resolution of the camera depth is aligned to the 1080P color sensor, StereoDepth will upscale depth to 1080P as well. Depth scaling can be avoided by configuring :ref:`StereoDepth`'s ``stereo.setOutputSize(width, height)``. +To align depth with **higher resolution color stream** (eg. 12MP), you need to limit the resolution of the depth map. You can +do that with ``stereo.setOutputSize(w,h)``. Code `example here `__. + Demo #### diff --git a/docs/source/samples/StereoDepth/stereo_depth_custom_mesh.rst b/docs/source/samples/StereoDepth/stereo_depth_custom_mesh.rst new file mode 100644 index 000000000..ae51f1eb9 --- /dev/null +++ b/docs/source/samples/StereoDepth/stereo_depth_custom_mesh.rst @@ -0,0 +1,52 @@ +Stereo Depth custom Mesh +======================== + +This example shows how you can load custom mesh to the device and use it for depth calculation. +In this example, mesh files are generated from camera calibration data, but you can also use +your own mesh files. + +By default, :ref:`StereoDepth` will use the same logic as inside the ``def getMesh()`` to calculate +mesh files whenever horizontal FOV is larger than 90°. You could also force calculate the mesh using: + +.. code-block:: python + + stereo = pipeline.create(dai.node.StereoDepth) + # Enable mesh calculation to correct distortion: + stereo.enableDistortionCorrection(True) + + +StereoDepth node also allows you to load mesh files directly from a file path: + +.. code-block:: python + + stereo = pipeline.create(dai.node.StereoDepth) + stereo.loadMeshFiles('path/to/left_mesh', 'path/to/right_mesh') + +Demo +#### + +.. image:: https://github.com/luxonis/depthai-python/assets/18037362/f2031bd4-0748-4a06-abb1-b52e9a17134e + +On the image above you can see that the rectified frame isn't as wide FOV as the original one, +that's because the distortion correction is applied (in this case via custom mesh files), so the +disparity matching can be performed correctly. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/StereoDepth/stereo_depth_custom_mesh.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/ToF/tof_depth.rst b/docs/source/samples/ToF/tof_depth.rst new file mode 100644 index 000000000..ac86656ec --- /dev/null +++ b/docs/source/samples/ToF/tof_depth.rst @@ -0,0 +1,45 @@ +ToF depth +========= + +This is a sample code that showcases how to use the ToF sensor. The :ref:`ToF node ` converts raw data from the ToF sensor into a depth map. + +Demo +#### + +This demo was recorded using the `OAK-D SR PoE `__, that's why we selected CAM_A port +on the ToF sensor. + +.. raw:: html + +
+ +
+ +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/ToF/tof_depth.py + :language: python + :linenos: + + .. tab:: C++ + + .. + Also `available on GitHub `__ + + .. literalinclude:: ../../../../depthai-core/examples/ToF/tof_depth.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/Warp/warp_mesh.rst b/docs/source/samples/Warp/warp_mesh.rst new file mode 100644 index 000000000..671e6933c --- /dev/null +++ b/docs/source/samples/Warp/warp_mesh.rst @@ -0,0 +1,40 @@ +Warp Mesh +========= + +This example shows usage of :ref:`Warp` node to warp the input image frame. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Demo +#### + +.. figure:: https://user-images.githubusercontent.com/18037362/214597821-2f76239a-48fa-4146-ba47-9cad872454ea.png + + Warped images + + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/Warp/warp_mesh.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../depthai-core/examples/Warp/warp_mesh.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/Warp/warp_mesh_interactive.rst b/docs/source/samples/Warp/warp_mesh_interactive.rst new file mode 100644 index 000000000..d72e0697d --- /dev/null +++ b/docs/source/samples/Warp/warp_mesh_interactive.rst @@ -0,0 +1,45 @@ +Interactive Warp Mesh +===================== + +This example shows usage of :ref:`Warp` node to warp the input image frame. It let's you interactively change the mesh points to warp the image. After changing the points, +**user has to press** ``r`` to restart the pipeline and apply the changes. + +User-defined arguments: + +- ``--mesh_dims`` - Mesh dimensions (default: ``4x4``). +- ``--resolution`` - Resolution of the input image (default: ``512x512``). Width must be divisible by 16. +- ``--random`` - To generate random mesh points (disabled by default). + +Originally developed by `geaxgx `__. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Demo +#### + +.. figure:: https://user-images.githubusercontent.com/18037362/214605914-87cf0404-2d89-478f-9062-2dfb4baa6512.png + + Original and warped image + + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/Warp/warp_mesh_interactive.py + :language: python + :linenos: + + .. tab:: C++ + + WIP + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/Yolo/tiny_yolo.rst b/docs/source/samples/Yolo/tiny_yolo.rst index bac9e595a..dd1f23ff2 100644 --- a/docs/source/samples/Yolo/tiny_yolo.rst +++ b/docs/source/samples/Yolo/tiny_yolo.rst @@ -1,7 +1,7 @@ RGB & Tiny YOLO =============== -This example shows how to run Tiny YOLOv4 or YOLOv3 on the RGB input frame, and how to display both the RGB +This example shows how to run YOLO on the RGB input frame, and how to display both the RGB preview and the metadata results from the YOLO model on the preview. Decoding is done on the `RVC `__ instead on the host computer. diff --git a/docs/source/samples/calibration/calibration_reader.rst b/docs/source/samples/calibration/calibration_reader.rst index b238cecea..93b363b67 100644 --- a/docs/source/samples/calibration/calibration_reader.rst +++ b/docs/source/samples/calibration/calibration_reader.rst @@ -9,6 +9,43 @@ This example shows how to read calibration data stored on device over XLink. Thi - :ref:`Calibration Flash` - :ref:`Calibration Load` +Camera intrinsics +~~~~~~~~~~~~~~~~~ + +Calibration also contains camera intrinsics and extrinsics parameters. + +.. code-block:: python + + import depthai as dai + + with dai.Device() as device: + calibData = device.readCalibration() + intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT) + print('Right mono camera focal length in pixels:', intrinsics[0][0]) + +Here's theoretical calculation of the focal length in pixels: + +.. math:: + + focalLength = width_px * 0.5 / tan(hfov * 0.5 * pi / 180) + +To get the HFOV you can use `this script `__, which also works for wide-FOV cameras and allows you to +specif alpha parameter. + +With 400P (640x400) camera resolution where HFOV=71.9 degrees: + +.. math:: + + focalLength = 640 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 441.25 + +And for 800P (1280x800) camera resolution where HFOV=71.9 degrees: + +.. math:: + + focalLength = 1280 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 882.5 + + + Setup ##### diff --git a/docs/source/samples/crash_report/crash_report.rst b/docs/source/samples/crash_report/crash_report.rst new file mode 100644 index 000000000..b6bb4fa00 --- /dev/null +++ b/docs/source/samples/crash_report/crash_report.rst @@ -0,0 +1,50 @@ +Crash report +============ + +In case of a firmware crash, OAK cameras will automatically generate a crash report and store it in the device. +The crash report contains information about the crash, such as the stack trace, the device's configuration, +and the device's state at the time of the crash. The crash report can be read from the device and sent to Luxonis for debugging purposes. + + +Demo +#### + +In case a crash report was found on the device, this example will read it and save it to a json file: + +.. code-block:: bash + + > python crash_report.py + Crash dump found on your device! + Saved to crashDump_0_184430102163DB0F00_3575b77f20e796b4e79953bf3d2ba22f0416ee8b.json + Please report to developers! + +Please **send the crash reports** together `with an MRE `__ (DepthAI issue) +to our `Discuss Forum `__. Thank you! + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/CrashReport/crash_report.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../depthai-core/examples/CrashReport/crash_report.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/host_side/device_information.rst b/docs/source/samples/host_side/device_information.rst index 265c5c7f7..d19302bef 100644 --- a/docs/source/samples/host_side/device_information.rst +++ b/docs/source/samples/host_side/device_information.rst @@ -24,7 +24,7 @@ Demo Found device '192.168.33.192', MxId: '1844301011F4C51200', State: 'BOOTLOADER' Booting the first available camera (1.3)... - Available camera sensors: {: 'OV9282', : 'IMX378', : 'OV9282'} + Available camera sensors: {: 'OV9282', : 'IMX378', : 'OV9282'} Product name: OAK-D Pro AF, board name DM9098 diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index e817ae708..193a04370 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -8,6 +8,7 @@ Code Samples ../samples/bootloader/* ../samples/calibration/* ../samples/ColorCamera/* + ../samples/crash_report/* ../samples/EdgeDetector/* ../samples/FeatureTracker/* ../samples/host_side/* @@ -22,7 +23,9 @@ Code Samples ../samples/SpatialDetection/* ../samples/StereoDepth/* ../samples/SystemLogger/* + ../samples/ToF/* ../samples/VideoEncoder/* + ../samples/Warp/* ../samples/Yolo/* Code samples are used for automated testing. They are also a great starting point for the DepthAI API, as different node functionalities @@ -50,6 +53,10 @@ are presented with code. - :ref:`RGB scene` - Shows how to select ColorCamera's scene and effect - :ref:`RGB video` - Displays high resolution frames of the RGB camera +.. rubric:: Crash report + +- :ref:`Crash report` - In case of a firmware crash, example reads it from the device and saves it to a json file + .. rubric:: EdgeDetector - :ref:`Edge Detector` - Performs edge detection on all camera streams @@ -97,6 +104,7 @@ are presented with code. - :ref:`Mono Preview` - Displays right/left mono cameras - :ref:`Mono Camera Control` - Demonstrates how to control the mono camera (crop, exposure, sensitivity) from the host +- :ref:`Mono preview - Alternate between dot projector and illumination LED` on OAK Pro devices - :ref:`Mono Full Resolution Saver` - Saves mono (720P) images to the host (:code:`.png`) .. rubric:: NeuralNetwork @@ -121,9 +129,12 @@ are presented with code. - :ref:`Script HTTP server` - Serve still image over HTTP response (only OAK-POE devices) - :ref:`Script MJPEG server` - Serve MJPEG video stream over HTTP response (only OAK-POE devices) - :ref:`Script NNData example` - Constructs :ref:`NNData` in Script node and sends it to the host +- :ref:`Script UART communication` - UART communication with Script node +- :ref:`Script EMMC access` - Access EMMC memory from the Script node .. rubric:: SpatialDetection +- :ref:`Spatial Calculator Multi-ROI` - Selects multiple ROIs and calculates spatial coordinates for each of them - :ref:`Spatial location calculator` - Demonstrates how to use the spatial location calculator - :ref:`RGB & MobilenetSSD with spatial data` - Displays RGB frames with MobileNet detections and spatial coordinates on them - :ref:`Mono & MobilenetSSD with spatial data` - Displays mono frames with MobileNet detections and spatial coordinates on them @@ -134,6 +145,7 @@ are presented with code. - :ref:`Depth Crop Control` - Demonstrates how to control cropping of depth frames from the host - :ref:`Depth Post-Processing` - Depth post-processing filters - :ref:`Depth Preview` - Displays colorized stereo disparity +- :ref:`Stereo Depth custom Mesh` - Calculate and load custom mesh for stereo depth calculation - :ref:`Stereo Depth from host` - Generates stereo depth frame from a set of mono images from the host - :ref:`Stereo Depth Video` - An extended version of **Depth Preview** - :ref:`RGB Depth alignment` - Displays RGB depth aligned frames @@ -142,6 +154,10 @@ are presented with code. - :ref:`System information` - Displays device system information (memory/cpu usage, temperature) +.. rubric:: ToF + +- :ref:`ToF depth` - Displays colorized ToF depth frames + .. rubric:: VideoEncoder - :ref:`Disparity encoding` - Encodes stereo disparity into :code:`.mjpeg` @@ -150,6 +166,11 @@ are presented with code. - :ref:`Encoding Max Limit` - Encodes RGB (4k 25FPS) and both mono streams (720P, 25FPS) into :code:`.h265`/:code:`.h264` and saves them on the host - :ref:`RGB Full Resolution Saver` - Saves full resolution RGB images (4k) on the host (:code:`.jpeg`) +.. rubric:: Warp + +- :ref:`Warp Mesh` - Displays an image warped with 2 different meshes +- :ref:`Interactive Warp Mesh` - Interactively change the warp mesh + .. rubric:: Yolo - :ref:`RGB & Tiny YOLO` - Runs Tiny YOLO on RGB frames and displays detections on the frame diff --git a/docs/source/tutorials/configuring-stereo-depth.rst b/docs/source/tutorials/configuring-stereo-depth.rst new file mode 100644 index 000000000..a5847c9bb --- /dev/null +++ b/docs/source/tutorials/configuring-stereo-depth.rst @@ -0,0 +1,518 @@ +Configuring Stereo Depth +######################## + +Our :ref:`StereoDepth node ` is very configurable and with this tutorial we will go over some **configurations and troubleshooting** +you can do to get the best results. + +This documentation is divided into 6 chapters: + +- :ref:`1. Stereo Depth Basics` +- :ref:`2. Fixing noisy depth` +- :ref:`3. Improving depth accuracy` +- :ref:`4. Short range stereo depth` +- :ref:`5. Long range stereo depth` +- :ref:`6. Fixing noisy pointcloud` + +1. Stereo Depth Basics +********************** + +`Stereo depth vision `__ works by calculating the disparity between two images taken from +slightly different points. + +Stereo vision works a lot like our eyes. Our brains (subconsciously) estimate the depth of objects and scenes based on the difference between what our left eye sees +versus what our right eye sees. On OAK-D cameras, it's exactly the same; we have left and right cameras (of the stereo camera pair) +and the OAK does on-device disparity matching to estimate the depth of objects and scenes. + +Disparity refers to the distance between two corresponding points in the left and right image of a stereo pair. + +.. image:: /_static/images/components/disparity_explanation.jpeg + +Depth from disparity +-------------------- + +Let's first look at how the depth is calculated: + +.. math:: + + depth [mm] = focalLength [pix] * baseline [mm] / disparity [pix] + +Examples for calculating the depth value, using the OAK-D (7.5cm baseline OV9282), for 400P resolution and disparity of 50 pixels: + +.. math:: + + depth = 441.25 * 7.5 / 50 = 66.19 cm + +`RVC2 `__-based cameras have a **0..95 disparity search** range, +which limits the minimal depth perception. Baseline is the distance between two cameras of the +stereo camera pair. You can read the camera's focal length (in pixels) from calibration, see the :ref:`tutorial here ` + +Disparity and depth are inversely related. As disparity decreases, the depth increases exponentially depending on the baseline and focal length. +Meaning, if the disparity value is close to zero, then a small change in disparity generates a large change in depth. +Similarly, if the disparity value is big, then some change in disparity doesn't lead to a large change in depth (better accuracy). + +Here's a graph showing disparity vs depth for OAK-D (7.5cm baseline distance) at 800P: + +.. figure:: /_static/images/components/disp_to_depth.jpg + + `Full chart here `__ + +Note the value of depth data is stored in *uint16*, where 0 means that the distance is invalid/unknown. + +How baseline distance and focal length affect depth +--------------------------------------------------- + +Looking at the depth formula above, we can see that either a larger baseline distance or a larger focal length will result +in further depth at the same disparity, which means that the depth accuracy will be better. + +Focal length is the distance between the camera lens and the image sensor. The larger the focal length, the narrower the FOV. + +So to get **long-range depth** perception, you can **increase the baseline distance and/or decrease the FOV**. + +.. note:: + + Wider FOV will result in worse depth accuracy, even at shorter ranges (where accuracy drop isn't as noticeable). + +2. Fixing noisy depth +********************* + +A few topics we have noticed that are relevant for stereo depth quality are: + +- :ref:`Scene Texture` +- :ref:`Stereo depth confidence threshold` +- :ref:`Stereo camera pair noise` +- :ref:`Stereo postprocessing filters` + +Scene Texture +------------- + +Due to the way the stereo matching algorithm works, **passive stereo depth requires** to have a **good texture** in the scene, otherwise, the depth will be noisy/invalid. +low-visual-interest surfaces (blank surfaces with little to no texture), such as a wall or floor. + +**Solution:** Our `Pro version `__ of OAK cameras have onboard **IR laser dot projector**, +which projects thousands of little dots on the scene, which helps the stereo matching algorithm as it provides more texture to the scene. + +.. image:: https://user-images.githubusercontent.com/18037362/222730554-a6c8d4d3-cb0b-422e-8474-6a979e73727a.gif + +The technique that we use is called ASV (`Conventional Active Stereo Vision `__) +as stereo matching is performed on the device the same way as on a passive stereo OAK-D. + + +Stereo depth confidence threshold +--------------------------------- + +When calculating the disparity, each pixel in the disparity map gets assigned a confidence value :code:`0..255` by the stereo matching algorithm. +This confidence score is kind of inverted (if, say, comparing with NN confidences): + +- **0** - maximum confidence that it holds a valid value +- **255** - minimum confidence, so there is more chance that the value is incorrect + +For the final disparity map, filtering is applied based on the confidence threshold value: the pixels that have their confidence score larger than +the threshold get invalidated, i.e. their disparity value is set to zero. You can set the confidence threshold via the API below. + +This means that with the confidence threshold, users can prioritize **fill-rate or accuracy**. + +.. tabs:: + + .. tab:: Python + + .. code-block:: python + + # Create the StereoDepth node + stereo_depth = pipeline.create(dai.node.StereoDepth) + stereo_depth.initialConfig.setConfidenceThreshold(threshold) + + # Or, alternatively, set the Stereo Preset Mode: + # Prioritize fill-rate, sets Confidence threshold to 245 + stereo_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) + # Prioritize accuracy, sets Confidence threshold to 200 + stereo_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_ACCURACY) + + .. tab:: C++ + + .. code-block:: cpp + + // Create the StereoDepth node + auto stereo_depth = pipeline.create(); + stereo_depth->initialConfig.setConfidenceThreshold(threshold); + + // Or, alternatively, set the Stereo Preset Mode: + // Prioritize fill-rate, sets Confidence threshold to 245 + stereo_depth->setDefaultProfilePreset(dai::node::StereoDepth::Preset::HIGH_DENSITY); + // Prioritize accuracy, sets Confidence threshold to 200 + stereo_depth->setDefaultProfilePreset(dai::node::StereoDepth::Preset::HIGH_ACCURACY); + + +.. + .. image:: gif of changing threshold, and how fill-rate/accuracy changes + +Stereo camera pair noise +------------------------ + +If input left/right images are noisy, the disparity map will be noisy as well. So the prerequisite for good depth are high IQ (see :ref:`Image Quality ` docs) +left/right stereo images. Active stereo (`Pro version `__ of OAK cameras) +mostly alleviates this issue, but for passive stereo cameras, there are a few things you can do to improve the quality of the stereo camera pair. + +It is preferred to use mono (grayscale) cameras for the stereo camera pair as they +have better quantum efficiency (QE) as they don't have color (Bayer) filter. Higher QE means more signal will be generated for the same amount of light (photons), +which leads to better SNR (signal-to-noise ratio). + +For better low-light performance, it's advised to use longer exposure times instead of higher gain (ISO) as it will improve SNR. Sometimes this means lowering +camera FPS - at 30 FPS, you can use 1/30s exposure time, at 15 FPS, you can use 1/15s exposure time, etc. For more information, see :ref:`Low-light increased sensitivity`. + +Another potential improvement is to tweak the sensor's ISP settings, like chroma & luma denoise, and sharpness. For more information, see the :ref:`Color camera ISP configuration`. + +Stereo postprocessing filters +----------------------------- + +The :ref:`StereoDepth` node has a few postprocessing filters that **run on-device**, which can be enabled to improve the quality of the disparity map. For **implementation +(API) details**, see :ref:`StereoDepth configurable blocks `. For an example, see the :ref:`Depth Post-Processing` example. + +As these filters run on the device, it has a some **performance cost**, which means that at high-resolution frames (1MP) these might bottleneck the FPS. To improve +the cost, one should consider using lower-resolution frames (eg. 400P) and/or using :ref:`Decimation filter`. Due to additional processing, these filters also introduce +:ref:`additional latency `. + +Median filter +~~~~~~~~~~~~~ + +This is a non-edge preserving Median filter, which can be used to reduce noise and smoothen the depth map. Median filter is implemented in hardware, so it's the +fastest filter. + +Speckle filter +~~~~~~~~~~~~~~ + +Speckle Filter is used to reduce the speckle noise. Speckle noise is a region with huge variance between neighboring disparity/depth pixels, and speckle +filter tries to filter this region. + +Temporal filter +~~~~~~~~~~~~~~~ + +Temporal Filter is intended to improve the depth data persistency by manipulating per-pixel values based on previous frames. The filter performs a single pass on +the data, adjusting the depth values while also updating the tracking history. + +In cases where the pixel data is missing or invalid, the filter uses a user-defined persistency mode to decide whether the missing value should be improved +with stored data. Note that due to its reliance on historic data, the filter may introduce +visible motion blurring/smearing artifacts, and therefore is best-suited for **static scenes**. + +Spatial filter +~~~~~~~~~~~~~~ + +Spatial Edge-Preserving Filter will fill invalid depth pixels with valid neighboring depth pixels. It performs a series of 1D horizontal and vertical passes or +iterations, to enhance the smoothness of the reconstructed data. It is based on `this research paper `__. + +Brightness filter +~~~~~~~~~~~~~~~~~ + +Brightness filter will filter out (invalidate, by setting to 0) all depth pixels for which input stereo camera image pixels are outside the configured +min/max brightness threshold values. This filter is useful when you have a high dynamic range scene, like outside on a bright day, or in general whenever +stereo camera pair can directly see a light source: + +.. figure:: https://user-images.githubusercontent.com/18037362/216110871-fe807fc0-858d-4c4d-bbae-3a8eff35645d.png + + Direct light source (ceiling light) - depth pixels are invalid + +It also helps with rectification "artifacts", especially when you have Wide FOV lenses and you apply alpha param. When there's no available pixel, +StereoDepth node will set that area to 0 (black) by default, but can be changed with ``stereoDepth.setRectifyEdgeFillColor(int8)``. This black area can then be +invalidated with brightness filter, as seen below: + +.. figure:: https://user-images.githubusercontent.com/18037362/223171135-734babe6-72b4-4aa1-9741-9fd8b4552555.jpeg + + Invalidating depth where we have rectification "artifacts" + +Threshold filter +~~~~~~~~~~~~~~~~ + +Threshold filter will filter out all depth pixels outside the configured min/max threshold values. In a controlled environment, where you know exactly how far the scene +can be (eg. 30cm - 2m) it's advised to use this filter. + +Decimation filter +~~~~~~~~~~~~~~~~~ + +Decimation Filter will sub-sample the depth map, which means it reduces the depth scene complexity and allows other filters to run faster. Setting +*decimationFactor* to 2 will downscale 1280x800 depth map to 640x400. We can either select pixel skipping, median, or mean decimation mode, and the latter two +modes help with filtering as well. + +It's also very useful :ref:`for pointclouds `. + +3. Improving depth accuracy +*************************** + +The above chapter we focused on noise, which isn't necessarily the only reason for inaccurate depth. + +There are a few ways to improve depth accuracy: + +- (mentioned above) :ref:`Fixing noisy depth <2. Fixing noisy depth>` - depth should be high quality in order to be accurate +- (mentioned above) :ref:`Stereo depth confidence threshold` should be low(er) in order to get the best accuracy +- :ref:`Move the camera closer to the object` for the best depth accuracy +- Enable :ref:`Stereo Subpixel mode`, especially if the object/scene isn't close to MinZ of the camera + +Move the camera closer to the object +------------------------------------ + +Looking at the :ref:`Depth from disparity` section, from the graph it's clear that at the 95 disparity pixels (close distance), +depth change between disparity pixels (eg. 95->94) is the lowest, so the **depth accuracy is the best**. + + +.. image:: /_static/images/components/theoretical_error.jpg + +Depth accuracy decreases exponentially with the distance from the camera. Note that with :ref:`Stereo Subpixel mode` +enabled you can have better depth accuracy (even at a longer distance) but it only works to some extent. + +So to conclude, **object/scene you are measuring** should be **as close as possible to MinZ** (minimal depth perception) of the camera +for **best depth accuracy**. You can find MinZ specification for each device in the `hardware documentation `__. + +Stereo Subpixel mode +-------------------- + +Let's first start with what Stereo Subpixel mode is and how it works. For image subpixel explanation, see `What's subpixel? `__). + +.. note:: + + The stereo depth pipeline is very complex (see :ref:`Internal block diagram of StereoDepth node`), and we will simplify it here for better understanding. It actually doesn't use confidence (eg. ``stereoDepth.confidenceMap`` output), but cost dump, which is what is used to calculate confidence values. + +When calculating disparity depth, stereo matching algorithm assign a "confidence" for each disparity pixel, which means each pixel +of the depth image contains 96 bytes (for confidence). If you are interested in all these cost values, you can use ``stereoDepth.debugDispCostDump`` output, +just note it's a very large output (eg. 1280*800*96 => 98MB for each frame). + +.. image:: /_static/images/components/disparity_confidence.jpg + +Stereo Subpixel mode will calculate subpixel disparity by looking at the confidence values of the 2 neighboring disparity pixels in each direction. +In the above example graph, in normal mode, StereoDepth would just get the max disparity = 34 pixels, but in Subpixel +mode, it will return a bit more, eg. 34.375 pixels, as confidences for pixels 35 and 36 are quite high as well. + +**TL;DR:** Stereo Subpixel mode should always provide more accurate depth, but will consume additional HW resources (see :ref:`Stereo depth FPS` for impact). + +Stereo subpixel effect on layering +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Default stereo depth output has 0..95 disparity pixels, which would produce 96 unique depth values. This can especially be seen when using pointcloud representation +and seeing how there are discrete "layers" of points, instead of a smooth transition: + +.. image:: /_static/images/components/pointcloud_layering.jpg + +This layering can especially be seen at longer distances, where these layers are exponentially further apart. + +But with Stereo Subpixel mode enabled, there are many more unique values possible, which produces more granular depth steps, and thus smoother a pointcloud. + +.. code-block:: python + + # Number of unique values based on subpixel bits setting + unique_values = 94 * 2 ^ subpixel_bits + 2 [min/max value] + +.. math:: + 94 * 2^3 + 2 = 754 + +.. math:: + 94 * 2^4 + 2 = 1506 + +.. math:: + 94 * 2^5 + 2 = 3010 + +One can change the number of subpixel bits by setting ``stereoDepth.setSubpixelFractionalBits(int)`` parameter to 3, 4 or 5 bits. + +4. Short range stereo depth +*************************** + +To get accurate short-range depth, you'd first need to follow :ref:`3. Improving depth accuracy` steps. +For most normal-FOV, OV9282 OAK-D* cameras, you'd want to have the object/scene about 70cm away from the camera, +where you'd get below 2% error (with good :ref:`Scene Texture`), so ± 1.5cm error. + +But how to get an even better depth accuracy, eg. **sub-cm stereo depth accuracy**? +As we have learned at :Ref:`How baseline distance and focal length affect depth`, we would want to +have a closer baseline distance and/or narrower FOV lenses. + +That's why for the short-range depth perception **we suggest using** `OAK-D SR `__, +which has 2 cm baseline distance, 800P resolution, and is ideal for depth sensing of up to 1 meter. + +Going back to :ref:`Depth from disparity`, minimal depth perception (**MinZ**) is defined by the following formula, where the disparity is 95 pixels +(maximum number of pixel for disparity search): + +.. math:: + depth = focalLength * baseline / disparity + +.. math:: + MinZ = focalLength * baseline / 95 + +How to get lower MinZ +--------------------- + +If the depth results for close-in objects look weird, this is likely because they are below MinZ distance of the OAK camera. You can get lower MinZ for OAK cameras by either: + +- :ref:`Lowering resolution ` +- Enabling :ref:`Stereo Extended Disparity mode` +- Using :ref:`Disparity shift` - suggested in a controlled environment, where MaxZ is known + +Both of these last 2 options can be enabled at the same time, which would set the minimum depth to 1/4 of the standard settings, but at such short distances the MinZ +could be limited by the focal length. + +Lowering resolution to decrease MinZ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Above we have a formula for MinZ, and by lowering the resolution, we are lowering focal length (in pixels), so let's look at the formula again: + +.. math:: + MinZ = focalLength * baseline / 95 + +.. math:: + MinZ [800P] = 882.5 * 7.5 / 95 = 70 cm + +.. math:: + MinZ [400P] = 441 * 7.5 / 95 = 35 cm + +As you can see, by lowering resolution by 2, we are also lowering MinZ by 2. Note that because you have fewer pixels, you will also have lower depth accuracy (in cm). + +Stereo Extended Disparity mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Very similar to :ref:`Lowering resolution to decrease MinZ`, Extended mode runs stereo depth pipeline twice (thus consuming more HW resources); once with resolution of +the frame that was passed to :ref:`StereoDepth` node, and once with resolution downscaled by 2, then combines the 2 output disparity maps. + +Disparity shift +~~~~~~~~~~~~~~~ + +In a controlled environment, where MaxZ is known in advance, to perceive closer depth range it's advised to use disparity shift, as it doesn't decrease depth accuracy +as the other 2 methods above do. + +Disparity shift will shift the starting point of the disparity search, which will significantly decrease MaxZ, but +it will also decrease the MinZ. Disparity shift can be combined with extended/subpixel/LR-check modes. + +.. image:: /_static/images/components/disparity_shift.png + +The **Left graph** shows min and max disparity and depth for OAK-D (7.5cm baseline, 800P resolution, ~70° HFOV) by default (disparity shift=0). See :ref:`Depth from disparity`. +Since hardware (stereo block) has a fixed 95 pixel disparity search, DepthAI will search from 0 pixels (depth=INF) to 95 pixels (depth=71cm). + +**Limitations**: +The **Right graph** shows the same, but at disparity shift set to 30 pixels. This means that disparity search will be from 30 pixels (depth=2.2m) to 125 pixels (depth=50cm). +This also means that depth will be very accurate at the short range (**theoretically** below 5mm depth error). + + +- Because of the inverse relationship between disparity and depth, MaxZ will decrease much faster than MinZ as the disparity shift is increased. Therefore, it is **advised not to use a larger-than-necessary disparity shift**. +- Teh tradeoff in reducing the MinZ this way is that objects at **distances farther away than MaxZ will not be seen**. +- Because of the point above, **we only recommend using disparity shift when MaxZ is known**, such as having a depth camera mounted above a table pointing down at the table surface. +- Output disparity map is not expanded, only the depth map. So if disparity shift is set to 50, and disparity value obtained is 90, the real disparity is 140. + +**Compared to Extended disparity**, disparity shift: + +- **(+)** Is faster, as it doesn't require an extra computation, which means there's also no extra latency +- **(-)** Reduces the MaxZ (significantly), while extended disparity only reduces MinZ. + +Disparity shift can be combined with extended disparity. + +.. doxygenfunction:: dai::StereoDepthConfig::setDisparityShift + :project: depthai-core + :no-link: + +Close range depth limitations +----------------------------- + +Since depth is calculated from disparity, which requires the pixels to overlap, there is inherently a vertical +band on the left side of the left mono camera and on the right side of the right mono camera, where depth +can not be calculated, since it is seen by only 1 stereo camera. + +At very close distance, even when enabling :ref:`Stereo Extended Disparity mode` and :ref:`Lowering resolution `, +you will notice this vertical band of invalid depth pixel. + +.. image:: https://user-images.githubusercontent.com/59799831/135310921-67726c28-07e7-4ffa-bc8d-74861049517e.png + +Meaning of variables on the picture: + +- ``BL [cm]`` - Baseline of stereo cameras. +- ``Dv [cm]`` - Minimum distance where both cameras see an object (thus where depth can be calculated). +- ``W [pixels]`` - Width of mono in pixels camera or amount of horizontal pixels, also noted as :code:`HPixels` in other formulas. +- ``D [cm]`` - Distance from the **camera plane** to an object (see image :ref:`here `). + +.. image:: https://user-images.githubusercontent.com/59799831/135310972-c37ba40b-20ad-4967-92a7-c71078bcef99.png + +With the use of the :code:`tan` function, the following formulas can be obtained: + +.. math:: + F = 2 * D * tan(HFOV/2) + +.. math:: + Dv = (BL/2) * tan(90 - HFOV/2) + +In order to obtain :code:`B`, we can use :code:`tan` function again (same as for :code:`F`), but this time +we must also multiply it by the ratio between :code:`W` and :code:`F` in order to convert units to pixels. +That gives the following formula: + +.. math:: + B = 2 * Dv * tan(HFOV/2) * W / F + +.. math:: + B = 2 * Dv * tan(HFOV/2) * W / (2 * D * tan(HFOV/2)) + +.. math:: + B [pixels] = W * Dv / D + +Example: If we are using OAK-D, which has a HFOV of 72°, a baseline (:code:`BL`) of 7.5 cm and +640x400 (400P) resolution is used, therefore :code:`W = 640` and an object is :code:`D = 100` cm away, we can +calculate :code:`B` in the following way: + +.. math:: + + Dv = 7.5 / 2 * tan(90 - 72/2) = 3.75 * tan(54°) = 5.16 cm + B = 640 * 5.16 / 100 = 33 + +Credit for calculations and images goes to our community member gregflurry, which he made on +`this `__ +forum post. + +5. Long range stereo depth +************************** + +To get accurate long-range depth, we should first check :ref:`3. Improving depth accuracy` steps, +as they are especially applicable to long-range depth. + +For long-range depth, we should also consider the following: + +- Narrow FOV lenses +- Wide baseline distance between stereo cameras + +That's why for long range, **we suggest using** `OAK-D LR `__, +which has a (larger) baseline distance of 15cm and default FOV of 60°. It has `M12 mount lenses `__, +so users can replace these with even narrower (or wider) FOV lenses. + +6. Fixing noisy pointcloud +************************** + +For noisy pointcloud we suggest a few approaches: + +* (mentioned above) Start with the :ref:`Fixing noisy depth <2. Fixing noisy depth>` chapter, as otherwise, noise will produce points all over the pointcloud +* (mentioned above) Continue with the :ref:`Improving depth accuracy <3. Improving depth accuracy>` chapter - depth inaccuracy will be easily visible in pointcloud + + * Enable Stereo subpixel mode, especially due to the :ref:`Stereo subpixel effect on layering` + +* :ref:`Decimation filter for pointcloud` for faster processing (FPS) and additional filtering +* :ref:`Invalidating pixels around the corner` should help to reduce noise around the corners of the depth frame +* :ref:`Host-side pointcloud filtering` for additional filtering + +Decimation filter for pointcloud +-------------------------------- + +:ref:`Decimation filter` is especially useful for pointclouds, you don't really want 1 million points (even though it sounds good for marketing), +as it's too much data to process. Decimation filter helps here, and should be enabled when working with pointclouds. + +When using decimation filter for pointcloud you should enable **median/mean mode decimation**, as it will provide additional filtering (compared to pixel skipping mode). +It also makes other :ref:`Stereo postprocessing filters` faster, since there will be less data to process. + +Invalidating pixels around the corner +------------------------------------- + +There are often invalid/noisy pixels around the corners, and we have seen that some customers preventively invalidate a few pixels (eg. 3) all around the corner of depth +image. We also suggest enabling :ref:`Brightness filter`, especially due to rectification "artifacts". + +Host-side pointcloud filtering +------------------------------ + +Besides device-side :ref:`Stereo postprocessing filters`, we also suggest running host-side pointcloud filtering (with eg. `Open3D `__, or `PCL `__ library). + +We especially suggest using pointcloud voxalization and removing statistical outliers techniques, `example here `__ for both of these. + + +.. + Best practices in certain environments + ************************************** + + - In high dynamic range env (like outside), use brightness filter (img above) + - In more static env, temporal filter + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/tutorials/debugging.rst b/docs/source/tutorials/debugging.rst index 4b6a36002..5ca71019b 100644 --- a/docs/source/tutorials/debugging.rst +++ b/docs/source/tutorials/debugging.rst @@ -104,4 +104,62 @@ Code above will print the following values to the user: [Script(0)] [warning] FP16 values: [1.2001953125, 1.2001953125, 3.900390625, 5.5] [Script(0)] [warning] UINT8 values: [6, 9, 4, 2, 0] +Resource Debugging +================== + +By enabling ``info`` log level (or lower), depthai will print usage of `hardware resources `__, +specifically SHAVE core and CMX memory usage: + +.. code-block:: bash + + NeuralNetwork allocated resources: shaves: [0-11] cmx slices: [0-11] # 12 SHAVES/CMXs allocated to NN + ColorCamera allocated resources: no shaves; cmx slices: [13-15] # 3 CMXs allocated to Color an Mono cameras (ISP) + MonoCamera allocated resources: no shaves; cmx slices: [13-15] + StereoDepth allocated resources: shaves: [12-12] cmx slices: [12-12] # StereoDepth node consumes 1 CMX and 1 SHAVE core + ImageManip allocated resources: shaves: [15-15] no cmx slices. # ImageManip node(s) consume 1 SHAVE core + SpatialLocationCalculator allocated resources: shaves: [14-14] no cmx slices. # SLC consumes 1 SHAVE core + +In total, this pipeline consumes 15 SHAVE cores and 16 CMX slices. The pipeline is running an object detection model compiled for 6 SHAVE cores. + +CPU usage +========= + +When setting the :ref:`DepthAI debugging level` to debug (or lower), depthai will also print our CPU usage for LeonOS and LeonRT. CPU usage +at 100% (or close to it) can cause many undesirable effects, such as higher frame latency, lower FPS, and in some cases even firmware crash. + +Compared to OAK USB cameras, OAK PoE cameras will have increased CPU consumption, as the networking stack is running on the LeonOS core. Besides +reducing pipeline (doing less processing), a good alternative is to reduce 3A FPS (ISP). This means that 3A algorithms (auto exposure, auto white balance +and auto focus) won't be run every frame, but every N frames. When updating DepthAI SDK's `camera_preview.py `__ +example (code change below), the LeonOS CPU usage decreased from 100% to ~46%: + +.. code-block:: bash + + # Without 3A FPS limit on OAK PoE camera: + Cpu Usage - LeonOS 99.99%, LeonRT: 6.91% + + # Limiting 3A to 15 FPS on OAK PoE camera: + Cpu Usage - LeonOS 46.24%, LeonRT: 3.90% + +Not having 100% CPU usage also drastically decreased frame latency, in the example for the script below it went from ~710 ms to ~110ms: + +.. image:: https://github.com/luxonis/depthai-python/assets/18037362/84ec8de8-58ce-49c7-b882-048141d284e0 + +.. code-block:: diff + + from depthai_sdk import OakCamera + + with OakCamera() as oak: + color = oak.create_camera('color') + left = oak.create_camera('left') + right = oak.create_camera('right') + + + # Limiting 3A to 15 FPS + + for node in [color.node, left.node, right.node]: + + node.setIsp3aFps(15) + + oak.visualize([color, left, right], fps=True, scale=2/3) + oak.start(blocking=True) + +Limiting 3A FPS can be achieved by calling :code:`setIsp3aFps()` function on the camera node (either :ref:`ColorCamera` or :ref:`MonoCamera`). + .. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/tutorials/image_quality.rst b/docs/source/tutorials/image_quality.rst index 4216bc442..5542badda 100644 --- a/docs/source/tutorials/image_quality.rst +++ b/docs/source/tutorials/image_quality.rst @@ -6,6 +6,7 @@ There are a few ways to improve Image Quality (IQ) on OAK cameras. A few example #. Changing :ref:`Color camera ISP configuration` #. Try keeping camera sensitivity low - :ref:`Low-light increased sensitivity` #. :ref:`Camera tuning` with custom tuning blobs +#. Ways to reduce :ref:`Motion blur` effects Note that the `Series 3 OAK cameras `__ will also have **temporal noise filter**, which will improve IQ. @@ -72,8 +73,32 @@ To tune your own camera sensors, one would need Intel's software, for which a li - **Mono tuning for low-light environments** `here `__. This allows auto-exposure to go up to 200ms (otherwise limited with default tuning to 33ms). For 200ms auto-exposure, you also need to limit the FPS (:code:`monoRight.setFps(5)`) - **Color tuning for low-light environments** `here `__. Comparison below. This allows auto-exposure to go up to 100ms (otherwise limited with default tuning to 33ms). For 200ms auto-exposure, you also need to limit the FPS (:code:`rgbCam.setFps(10)`). *Known limitation*: flicker can be seen with auto-exposure over 33ms, it is caused by auto-focus working in continuous mode. A workaround is to change from CONTINUOUS_VIDEO (default) to AUTO (focusing only once at init, and on further focus trigger commands): :code:`camRgb.initialControl.setAutoFocusMode(dai.CameraControl.AutoFocusMode.AUTO)` - **OV9782 Wide FOV color tuning for sunlight environments** `here `__. Fixes lens color filtering on direct sunglight, see `blog post here `__. It also improves LSC (Lens Shading Correction). Currently doesn't work for OV9282, so when used on eg. Series 2 OAK with Wide FOV cams, mono cameras shouldn't be enabled. +- **Camera exposure limit**: `max 500us `__, `max 8300us `__. These tuning blobs will limit the maximum exposure time, and instead start increasing ISO (sensitivity) after max exposure time is reached. This is a useful approach to reduce the :ref:`Motion blur`. .. image:: https://user-images.githubusercontent.com/18037362/149826169-3b92901d-3367-460b-afbf-c33d8dc9d118.jpeg +Motion blur +########### + +`Motion blur `__ appears when the camera shutter is opened for a longer time, and the object is moving during that time. + +.. image:: https://user-images.githubusercontent.com/18037362/209683640-2a640794-8422-4119-9d78-6c23690418a1.jpg + +In the image above the right foot moved about 50 pixels during the exposure time, which results in a blurry image in that region. +The left foot was on the ground the whole time of the exposure, so it's not blurry. + +In **high-vibration environments** we recommend **using Fixed-Focus** color camera, as otherwise the Auto-Focus lens will be shaking and +cause blurry images (`docs here `__). + +**Potential workarounds:** + +1. Decrease the shutter (exposure) time - this will decrease the motion blur, but will also decrease the light that reaches the sensor, so the image will be darker. You could either use a larger sensor (so more photons hit the sensor) or use a higher ISO (sensitivity) value. One option to limit max exposure time is by using a :reF:`Camera tuning` blob. +2. If the motion blur negatively affects your model's accuracy, you could fine-tune it to be more robust to motion blur by including motion blur images in your training dataset. Example video: + +.. raw:: html + +
+ +
.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/tutorials/low-latency.rst b/docs/source/tutorials/low-latency.rst index a4d33aaf0..4fa18f94f 100644 --- a/docs/source/tutorials/low-latency.rst +++ b/docs/source/tutorials/low-latency.rst @@ -43,8 +43,53 @@ disabled for these tests (:code:`pipeline.setXLinkChunkSize(0)`). For an example - 246 Mbps - `link `__ +Below are the same tests, but with **OAK PoE** camera, which uses Gigabit ethernet link. The camera was connected directly to the computer, +without any switches or routers in between. Power was supplied via M8 connector. `oak_bandwidth_test.py `__ results: 797 mbps downlink, 264 mbps uplink. +`oak_latency_test.py `__ results: Average: 5.2 ms, Std: 6.2 + +.. list-table:: + :header-rows: 1 + + * - What + - Resolution + - FPS + - FPS set + - Time-to-Host [ms] + - Bandwidth + * - Color (isp) + - 1080P + - 25 + - 25 + - 51 + - 622 Mbps + * - Color (isp) + - 4K + - 8 + - 8 + - 148 + - 530 Mbps + * - Color (isp) + - 4K + - 8.5 + - 10 + - 530 + - 663 Mbps + * - Mono + - 400P + - 90 + - 90 + - Avrg: 12 (Std: 5.0) + - 184 Mbps + * - Mono + - 400P + - 110 + - 110 + - Avrg: 16 (Std: 9.4) + - 225 Mbps + + - **Time-to-Host** is measured time between frame timestamp (:code:`imgFrame.getTimestamp()`) and host timestamp when the frame is received (:code:`dai.Clock.now()`). -- **Histogram** shows how much Time-to-Host varies frame to frame. Y axis represents number of frame that occured at that time while the X axis represents microseconds. +- **Histogram** shows how much Time-to-Host varies frame to frame. Y axis represents number of frame that occurred at that time while the X axis represents microseconds. - **Bandwidth** is calculated bandwidth required to stream specified frames at specified FPS. Encoded frames @@ -113,6 +158,69 @@ branch of the DepthAI. This will pass pointers (at XLink level) to cv2.Mat inste so performance improvement would depend on the image sizes you are using. (Note: API differs and not all functionality is available as is on the `message_zero_copy` branch) +PoE latency +########### + +On PoE, the latency can vary quite a bit due to a number of factors: + +* **Network** itself. Eg. if you are in a large network with many nodes, the latency will be higher compared to using a direct connection. +* There's a **bottleneck** in **bandwidth**: + + * Perhaps some network link is 10mbps/100mbps instead of full 1gbps (due to switch/network card..). You can test this with `PoE Test script `__ (``speed`` should be 1000). + * Network/computer is saturated with other traffic. You can test the actual bandwidth with `OAK bandwidth test `__ script. With direct link I got ~800mbps downlink and ~210mbps uplink. + +* Computer's **Network Interface Card settings**, `documentation here `__ +* 100% OAK Leon CSS (CPU) usage. The Leon CSS core handles the POE communication (`see docs here `__), and if the CPU is 100% used, it will not be able to handle the communication as fast as it should. **Workaround:** See :ref:`CPU usage` docs. +* Another potential way to improve PoE latency would be to fine-tune network settings, like MTU, TCP window size, etc. (see `here `__ for more info) + +Bandwidth +######### + +With large, unencoded frames, one can quickly saturate the bandwidth even at 30FPS, especially on PoE devices (1gbps link): + +.. code-block:: + + 4K NV12/YUV420 frames: 3840 * 2160 * 1.5 * 30fps * 8bits = 3 gbps + 1080P NV12/YUV420 frames: 1920 * 1080 * 1.5 * 30fps * 8bits = 747 mbps + 720P NV12/YUV420 frames: 1280 * 720 * 1.5 * 30fps * 8bits = 331 mbps + + 1080P RGB frames: 1920 * 1080 * 3 * 30fps * 8bits = 1.5 gbps + + 800P depth frames: 1280 * 800 * 2 * 30fps * 8bits = 492 mbps + 400P depth frames: 640 * 400 * 2 * 30fps * 8bits = 123 mbps + + 800P mono frames: 1280 * 800 * 1 * 30fps * 8bits = 246 mbps + 400P mono frames: 640 * 400 * 1 * 30fps * 8bits = 62 mbps + +The third value in the formula is byte/pixel, which is 1.5 for NV12/YUV420, 3 for RGB, and 2 for depth frames, and 1 +for mono (grayscale) frames. It's either 1 (normal) or 2 (subpixel mode) for disparity frames. + +A few options to reduce bandwidth: + +- Encode frames (H.264, H.265, MJPEG) on-device using :ref:`VideoEncoder node ` +- Reduce FPS/resolution/number of streams + +Measuring operation times +######################### + +If user sets depthai level to `trace` (see :ref:`DepthAI debugging level`), depthai will log operation times for each node/process, as shown below. + +.. code-block:: bash + :emphasize-lines: 1,2,5,6,7,8,9,10,13 + + [SpatialDetectionNetwork(1)] [trace] SpatialDetectionNetwork syncing took '70.39142' ms. + [StereoDepth(4)] [trace] Warp node took '2.2945' ms. + [system] [trace] EV:0,S:0,IDS:27,IDD:10,TSS:2,TSN:601935518 + [system] [trace] EV:0,S:1,IDS:27,IDD:10,TSS:2,TSN:602001382 + [StereoDepth(4)] [trace] Stereo took '12.27392' ms. + [StereoDepth(4)] [trace] 'Median+Disparity to depth' pipeline took '0.86295' ms. + [StereoDepth(4)] [trace] Stereo post processing (total) took '0.931422' ms. + [SpatialDetectionNetwork(1)] [trace] NeuralNetwork inference took '62.274784' ms. + [StereoDepth(4)] [trace] Stereo rectification took '2.686294' ms. + [MonoCamera(3)] [trace] Mono ISP took '1.726888' ms. + [system] [trace] EV:0,S:0,IDS:20,IDD:25,TSS:2,TSN:616446812 + [system] [trace] EV:0,S:1,IDS:20,IDD:25,TSS:2,TSN:616489715 + [SpatialDetectionNetwork(1)] [trace] DetectionParser took '3.464118' ms. Reducing latency when running NN ################################ @@ -120,8 +228,65 @@ Reducing latency when running NN In the examples above we were only streaming frames, without doing anything else on the OAK camera. This section will focus on how to reduce latency when also running NN model on the OAK. -Lowering camera FPS to match NN FPS ------------------------------------ +1. Increasing NN resources +-------------------------- + +One option to reduce latency is to increase the NN resources. This can be done by changing the number of allocated NCEs and SHAVES (see HW accelerator `docs here `__). +`Compile Tool `__ can compile a model for more SHAVE cores. To allocate more NCEs, you can use API below: + +.. code-block:: python + + import depthai as dai + + pipeline = dai.Pipeline() + # nn = pipeline.createNeuralNetwork() + # nn = pipeline.create(dai.node.MobileNetDetectionNetwork) + nn = pipeline.create(dai.node.YoloDetectionNetwork) + nn.setNumInferenceThreads(1) # By default 2 threads are used + nn.setNumNCEPerInferenceThread(2) # By default, 1 NCE is used per thread + +Models usually run at **max FPS** when using 2 threads (1 NCE/Thread), and compiling model for ``AVAILABLE_SHAVES / 2``. + +Example of FPS & latency comparison for YoloV7-tiny: + +.. list-table:: + :header-rows: 1 + + * - NN resources + - Camera FPS + - Latency + - NN FPS + * - **6 SHAVEs, 2x Threads (1NCE/Thread)** + - 15 + - 155 ms + - 15 + * - 6 SHAVEs, 2x Threads (1NCE/Thread) + - 14 + - 149 ms + - 14 + * - 6 SHAVEs, 2x Threads (1NCE/Thread) + - 13 + - 146 ms + - 13 + * - 6 SHAVEs, 2x Threads (1NCE/Thread) + - 10 + - 141 ms + - 10 + * - **13 SHAVEs, 1x Thread (2NCE/Thread)** + - 30 + - 145 ms + - 11.6 + * - 13 SHAVEs, 1x Thread (2NCE/Thread) + - 12 + - 128 ms + - 12 + * - 13 SHAVEs, 1x Thread (2NCE/Thread) + - 10 + - 118 ms + - 10 + +2. Lowering camera FPS to match NN FPS +-------------------------------------- Lowering FPS to not exceed NN capabilities typically provides the best latency performance, since the NN is able to start the inference as soon as a new frame is available. @@ -139,11 +304,11 @@ This time includes the following: - And finally, eventual extra latency until it reaches the app Note: if the FPS is increased slightly more, towards 19..21 FPS, an extra latency of about 10ms appears, that we believe -is related to firmware. We are activaly looking for improvements for lower latencies. +is related to firmware. We are actively looking for improvements for lower latencies. -NN input queue size and blocking behaviour ------------------------------------------- +3. NN input queue size and blocking behavior +-------------------------------------------- If the app has ``detNetwork.input.setBlocking(False)``, but the queue size doesn't change, the following adjustment may help improve latency performance: diff --git a/docs/source/tutorials/standalone_mode.rst b/docs/source/tutorials/standalone_mode.rst index c4eb4f189..95f4440a5 100644 --- a/docs/source/tutorials/standalone_mode.rst +++ b/docs/source/tutorials/standalone_mode.rst @@ -67,26 +67,49 @@ can flash the pipeline to the device, along with its assests (eg. AI models). Yo After successfully flashing the pipeline, it will get started automatically when you power up the device. If you would like to change the flashed pipeline, simply re-flash it again. -.. - Clear flash - ########### +Alternatively, you can also flash the pipeline with the :ref:`Device Manager`. For this approach, you will need a Depthai Application Package (.dap), which you +can create with the following script: - Since pipeline will start when powering the device, this can lead to unnecesary heating. If you would like to clear - the flashed pipeline, use the code snippet below. - .. warning:: - Code below doesn't work yet. We will be adding "flashClear" helper function to the library. +.. code-block:: python - .. code-block:: python + import depthai as dai - import depthai as dai - (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() - if not f: - print('No devices found, exiting...') - exit(-1) + pipeline = dai.Pipeline() + + # Define standalone pipeline; add nodes and link them + # cam = pipeline.create(dai.node.ColorCamera) + # script = pipeline.create(dai.node.Script) + # ... + + # Create Depthai Application Package (.dap) + (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() + bootloader = dai.DeviceBootloader(bl) + bootloader.saveDepthaiApplicationPackage(pipeline=pipeline, path=) + + +Clear flash +########### + +Since pipeline will start when powering the device, this can lead to unnecesary heating. If you would like to clear +the flashed pipeline, use the code snippet below. + + +.. warning:: + Code below doesn't work yet. We will be adding "flashClear" helper function to the library. + + +.. code-block:: python + + import depthai as dai + (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() + if not f: + print('No devices found, exiting...') + exit(-1) + + with dai.DeviceBootloader(bl) as bootloader: + bootloader.flashClear() - with dai.DeviceBootloader(bl) as bootloader: - bootloader.flashClear() Factory reset ############# diff --git a/examples/AprilTag/apriltag.py b/examples/AprilTag/apriltag.py index 2141ca132..bb41d5e70 100755 --- a/examples/AprilTag/apriltag.py +++ b/examples/AprilTag/apriltag.py @@ -19,7 +19,7 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") aprilTag.initialConfig.setFamily(dai.AprilTagConfig.Family.TAG_36H11) diff --git a/examples/AprilTag/apriltag_rgb.py b/examples/AprilTag/apriltag_rgb.py index 5d2858749..fa4f5ad31 100755 --- a/examples/AprilTag/apriltag_rgb.py +++ b/examples/AprilTag/apriltag_rgb.py @@ -20,7 +20,7 @@ # Properties camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) manip.initialConfig.setResize(480, 270) manip.initialConfig.setFrameType(dai.ImgFrame.Type.GRAY8) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index ea38f5bc1..511a59cce 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -109,6 +109,7 @@ add_python_example(rgb_camera_control ColorCamera/rgb_camera_control.py) add_python_example(rgb_preview ColorCamera/rgb_preview.py) add_python_example(rgb_scene ColorCamera/rgb_scene.py) add_python_example(rgb_video ColorCamera/rgb_video.py) +add_python_example(rgb_isp_scale ColorCamera/rgb_isp_scale.py) ## EdgeDetector add_python_example(edge_detector EdgeDetector/edge_detector.py) diff --git a/examples/Camera/camera_isp.py b/examples/Camera/camera_isp.py new file mode 100755 index 000000000..5173d56e9 --- /dev/null +++ b/examples/Camera/camera_isp.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import time + +# Connect to device and start pipeline +with dai.Device() as device: + # Device name + print('Device name:', device.getDeviceName()) + # Bootloader version + if device.getBootloaderVersion() is not None: + print('Bootloader version:', device.getBootloaderVersion()) + # Print out usb speed + print('Usb speed:', device.getUsbSpeed().name) + # Connected cameras + print('Connected cameras:', device.getConnectedCameraFeatures()) + + # Create pipeline + pipeline = dai.Pipeline() + cams = device.getConnectedCameraFeatures() + streams = [] + for cam in cams: + print(str(cam), str(cam.socket), cam.socket) + c = pipeline.create(dai.node.Camera) + x = pipeline.create(dai.node.XLinkOut) + c.isp.link(x.input) + c.setBoardSocket(cam.socket) + stream = str(cam.socket) + if cam.name: + stream = f'{cam.name} ({stream})' + x.setStreamName(stream) + streams.append(stream) + + # Start pipeline + device.startPipeline(pipeline) + fpsCounter = {} + lastFpsCount = {} + tfps = time.time() + while not device.isClosed(): + queueNames = device.getQueueEvents(streams) + for stream in queueNames: + messages = device.getOutputQueue(stream).tryGetAll() + fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages) + for message in messages: + # Display arrived frames + if type(message) == dai.ImgFrame: + # render fps + fps = lastFpsCount.get(stream, 0) + frame = message.getCvFrame() + cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255)) + cv2.imshow(stream, frame) + + if time.time() - tfps >= 1.0: + scale = time.time() - tfps + for stream in fpsCounter.keys(): + lastFpsCount[stream] = fpsCounter[stream] / scale + fpsCounter = {} + tfps = time.time() + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/Camera/camera_preview.py b/examples/Camera/camera_preview.py new file mode 100755 index 000000000..88c5cc3c3 --- /dev/null +++ b/examples/Camera/camera_preview.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import time + +# Connect to device and start pipeline +with dai.Device() as device: + # Device name + print('Device name:', device.getDeviceName()) + # Bootloader version + if device.getBootloaderVersion() is not None: + print('Bootloader version:', device.getBootloaderVersion()) + # Print out usb speed + print('Usb speed:', device.getUsbSpeed().name) + # Connected cameras + print('Connected cameras:', device.getConnectedCameraFeatures()) + + # Create pipeline + pipeline = dai.Pipeline() + cams = device.getConnectedCameraFeatures() + streams = [] + for cam in cams: + print(str(cam), str(cam.socket), cam.socket) + c = pipeline.create(dai.node.Camera) + x = pipeline.create(dai.node.XLinkOut) + c.preview.link(x.input) + c.setBoardSocket(cam.socket) + stream = str(cam.socket) + if cam.name: + stream = f'{cam.name} ({stream})' + x.setStreamName(stream) + streams.append(stream) + + # Start pipeline + device.startPipeline(pipeline) + fpsCounter = {} + lastFpsCount = {} + tfps = time.time() + while not device.isClosed(): + queueNames = device.getQueueEvents(streams) + for stream in queueNames: + messages = device.getOutputQueue(stream).tryGetAll() + fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages) + for message in messages: + # Display arrived frames + if type(message) == dai.ImgFrame: + # render fps + fps = lastFpsCount.get(stream, 0) + frame = message.getCvFrame() + cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255)) + cv2.imshow(stream, frame) + + if time.time() - tfps >= 1.0: + scale = time.time() - tfps + for stream in fpsCounter.keys(): + lastFpsCount[stream] = fpsCounter[stream] / scale + fpsCounter = {} + tfps = time.time() + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/ColorCamera/rgb_isp_scale.py b/examples/ColorCamera/rgb_isp_scale.py new file mode 100755 index 000000000..fd8d725ea --- /dev/null +++ b/examples/ColorCamera/rgb_isp_scale.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai + +# Create pipeline +pipeline = dai.Pipeline() + +# Define source and output +camRgb = pipeline.create(dai.node.ColorCamera) +xoutVideo = pipeline.create(dai.node.XLinkOut) + +xoutVideo.setStreamName("video") + +# Properties +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) +camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) +camRgb.setIspScale(1, 2) +camRgb.setVideoSize(1920, 1080) + +xoutVideo.input.setBlocking(False) +xoutVideo.input.setQueueSize(1) + +# Linking +camRgb.video.link(xoutVideo.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + video = device.getOutputQueue(name="video", maxSize=1, blocking=False) + + while True: + videoIn = video.get() + + # Get BGR frame from NV12 encoded video frame to show with opencv + # Visualizing the frame on slower hosts might have overhead + cv2.imshow("video", videoIn.getCvFrame()) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/ColorCamera/rgb_undistort.py b/examples/ColorCamera/rgb_undistort.py index 5111bbca6..97e71b184 100755 --- a/examples/ColorCamera/rgb_undistort.py +++ b/examples/ColorCamera/rgb_undistort.py @@ -3,7 +3,7 @@ import numpy as np camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P -camSocket = dai.CameraBoardSocket.RGB +camSocket = dai.CameraBoardSocket.CAM_A ispScale = (1,2) def getMesh(calibData, ispSize): diff --git a/examples/ColorCamera/rgb_uvc.py b/examples/ColorCamera/rgb_uvc.py new file mode 100755 index 000000000..78a27129b --- /dev/null +++ b/examples/ColorCamera/rgb_uvc.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +import time +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-fb', '--flash-bootloader', default=False, action="store_true") +parser.add_argument('-f', '--flash-app', default=False, action="store_true") +parser.add_argument('-l', '--load-and-exit', default=False, action="store_true") +args = parser.parse_args() + +if args.load_and_exit: + import os + # Disabling device watchdog, so it doesn't need the host to ping periodically. + # Note: this is done before importing `depthai` + os.environ["DEPTHAI_WATCHDOG"] = "0" + +import depthai as dai + +def getPipeline(): + enable_4k = True # Will downscale 4K -> 1080p + + pipeline = dai.Pipeline() + + # Define a source - color camera + cam_rgb = pipeline.createColorCamera() + cam_rgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) + cam_rgb.setInterleaved(False) + #cam_rgb.initialControl.setManualFocus(130) + + if enable_4k: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) + cam_rgb.setIspScale(1, 2) + else: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) + + # Create an UVC (USB Video Class) output node + uvc = pipeline.createUVC() + cam_rgb.video.link(uvc.input) + + # Note: if the pipeline is sent later to device (using startPipeline()), + # it is important to pass the device config separately when creating the device + config = dai.Device.Config() + # config.board.uvc = dai.BoardConfig.UVC() # enable default 1920x1080 NV12 + config.board.uvc = dai.BoardConfig.UVC(1920, 1080) + config.board.uvc.frameType = dai.ImgFrame.Type.NV12 + # config.board.uvc.cameraName = "My Custom Cam" + pipeline.setBoardConfig(config.board) + + return pipeline + +# Will flash the bootloader if no pipeline is provided as argument +def flash(pipeline=None): + (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() + bootloader = dai.DeviceBootloader(bl, True) + + # Create a progress callback lambda + progress = lambda p : print(f'Flashing progress: {p*100:.1f}%') + + startTime = time.monotonic() + if pipeline is None: + print("Flashing bootloader...") + bootloader.flashBootloader(progress) + else: + print("Flashing application pipeline...") + bootloader.flash(progress, pipeline) + + elapsedTime = round(time.monotonic() - startTime, 2) + print("Done in", elapsedTime, "seconds") + +if args.flash_bootloader or args.flash_app: + if args.flash_bootloader: flash() + if args.flash_app: flash(getPipeline()) + print("Flashing successful. Please power-cycle the device") + quit() + +if args.load_and_exit: + device = dai.Device(getPipeline()) + print("\nDevice started. Attempting to force-terminate this process...") + print("Open an UVC viewer to check the camera stream.") + print("To reconnect with depthai, a device power-cycle may be required in some cases") + # We do not want the device to be closed, so terminate the process uncleanly. + # (TODO add depthai API to be able to cleanly exit without closing device) + import signal + os.kill(os.getpid(), signal.SIGTERM) + +# Standard UVC load with depthai +with dai.Device(getPipeline()) as device: + print("\nDevice started, please keep this process running") + print("and open an UVC viewer to check the camera stream.") + print("\nTo close: Ctrl+C") + + # Doing nothing here, just keeping the host feeding the watchdog + while True: + try: + time.sleep(0.1) + except KeyboardInterrupt: + break diff --git a/examples/ColorCamera/rgb_video.py b/examples/ColorCamera/rgb_video.py index c808e333b..66a32d2ae 100755 --- a/examples/ColorCamera/rgb_video.py +++ b/examples/ColorCamera/rgb_video.py @@ -13,7 +13,7 @@ xoutVideo.setStreamName("video") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setVideoSize(1920, 1080) diff --git a/examples/CrashReport/crash_report.py b/examples/CrashReport/crash_report.py new file mode 100755 index 000000000..a68b48ade --- /dev/null +++ b/examples/CrashReport/crash_report.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +from json import dump +from os.path import exists + +# Connect to device and start pipeline +with dai.Device() as device: + + if device.hasCrashDump(): + crashDump = device.getCrashDump() + commitHash = crashDump.depthaiCommitHash + deviceId = crashDump.deviceId + + json = crashDump.serializeToJson() + + i = -1 + while True: + i += 1 + destPath = "crashDump_" + str(i) + "_" + deviceId + "_" + commitHash + ".json" + if exists(destPath): + continue + + with open(destPath, 'w', encoding='utf-8') as f: + dump(json, f, ensure_ascii=False, indent=4) + + print("Crash dump found on your device!") + print(f"Saved to {destPath}") + print("Please report to developers!") + break + else: + print("There was no crash dump found on your device!") + diff --git a/examples/EdgeDetector/edge_detector.py b/examples/EdgeDetector/edge_detector.py index db699e404..b23dfd1a3 100755 --- a/examples/EdgeDetector/edge_detector.py +++ b/examples/EdgeDetector/edge_detector.py @@ -32,13 +32,13 @@ xinEdgeCfg.setStreamName(edgeCfgStr) # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") edgeDetectorRgb.setMaxOutputFrameSize(camRgb.getVideoWidth() * camRgb.getVideoHeight()) diff --git a/examples/FeatureTracker/feature_detector.py b/examples/FeatureTracker/feature_detector.py index c9a5642cc..ebad14411 100755 --- a/examples/FeatureTracker/feature_detector.py +++ b/examples/FeatureTracker/feature_detector.py @@ -27,9 +27,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Disable optical flow featureTrackerLeft.initialConfig.setMotionEstimator(False) diff --git a/examples/FeatureTracker/feature_tracker.py b/examples/FeatureTracker/feature_tracker.py index 61ddf4ce7..4be337e91 100755 --- a/examples/FeatureTracker/feature_tracker.py +++ b/examples/FeatureTracker/feature_tracker.py @@ -94,9 +94,9 @@ def __init__(self, trackbarName, windowName): # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Linking monoLeft.out.link(featureTrackerLeft.inputImage) diff --git a/examples/ImageManip/image_manip_rotate.py b/examples/ImageManip/image_manip_rotate.py index f986919e4..37b09733d 100755 --- a/examples/ImageManip/image_manip_rotate.py +++ b/examples/ImageManip/image_manip_rotate.py @@ -27,7 +27,7 @@ # Rotate mono frames monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") manipLeft = pipeline.create(dai.node.ImageManip) rr = dai.RotatedRect() diff --git a/examples/ImageManip/image_manip_warp_mesh.py b/examples/ImageManip/image_manip_warp_mesh.py old mode 100644 new mode 100755 index 3eeaa43da..eadff4c77 --- a/examples/ImageManip/image_manip_warp_mesh.py +++ b/examples/ImageManip/image_manip_warp_mesh.py @@ -12,7 +12,7 @@ maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3 # Warp preview frame 1 -manip1 = pipeline.create(dai.node.Warp) +manip1 = pipeline.create(dai.node.ImageManip) # Create a custom warp mesh tl = dai.Point2f(20, 20) tr = dai.Point2f(460, 20) diff --git a/examples/MobileNet/mono_mobilenet.py b/examples/MobileNet/mono_mobilenet.py index 635dfc6af..49ccae728 100755 --- a/examples/MobileNet/mono_mobilenet.py +++ b/examples/MobileNet/mono_mobilenet.py @@ -33,7 +33,7 @@ nnOut.setStreamName("nn") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Convert the grayscale frame into the nn-acceptable form diff --git a/examples/MonoCamera/mono_camera_control.py b/examples/MonoCamera/mono_camera_control.py index 059ae8c66..b70c59441 100755 --- a/examples/MonoCamera/mono_camera_control.py +++ b/examples/MonoCamera/mono_camera_control.py @@ -48,8 +48,8 @@ def clamp(num, v0, v1): bottomRight = dai.Point2f(0.8, 0.8) # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setCamera("right") +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) manipRight.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) diff --git a/examples/MonoCamera/mono_full_resolution_saver.py b/examples/MonoCamera/mono_full_resolution_saver.py index 42a489361..06e263418 100755 --- a/examples/MonoCamera/mono_full_resolution_saver.py +++ b/examples/MonoCamera/mono_full_resolution_saver.py @@ -15,7 +15,7 @@ xoutRight.setStreamName("right") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Linking diff --git a/examples/MonoCamera/mono_preview.py b/examples/MonoCamera/mono_preview.py index cf3668210..eff46654e 100755 --- a/examples/MonoCamera/mono_preview.py +++ b/examples/MonoCamera/mono_preview.py @@ -19,9 +19,9 @@ xoutVertical.setStreamName('vertical') # Properties -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) monoVertical.setBoardSocket(dai.CameraBoardSocket.VERTICAL) monoVertical.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) diff --git a/examples/MonoCamera/mono_preview_alternate_pro.py b/examples/MonoCamera/mono_preview_alternate_pro.py old mode 100644 new mode 100755 index 826d5d927..6f5997150 --- a/examples/MonoCamera/mono_preview_alternate_pro.py +++ b/examples/MonoCamera/mono_preview_alternate_pro.py @@ -19,11 +19,11 @@ monoL = pipeline.create(dai.node.MonoCamera) monoR = pipeline.create(dai.node.MonoCamera) -monoL.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoL.setCamera("left") monoL.setResolution(res) monoL.setFps(fps) monoL.setNumFramesPool(poolSize) -monoR.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoR.setCamera("right") monoR.setResolution(res) monoR.setFps(fps) monoR.setNumFramesPool(poolSize) diff --git a/examples/NeuralNetwork/concat_multi_input.py b/examples/NeuralNetwork/concat_multi_input.py old mode 100644 new mode 100755 index 93e9d03d0..08b65a5f7 --- a/examples/NeuralNetwork/concat_multi_input.py +++ b/examples/NeuralNetwork/concat_multi_input.py @@ -42,8 +42,8 @@ def create_mono(p, socket): nn.setNumInferenceThreads(2) camRgb.preview.link(nn.inputs['img2']) -create_mono(p, dai.CameraBoardSocket.LEFT).link(nn.inputs['img1']) -create_mono(p, dai.CameraBoardSocket.RIGHT).link(nn.inputs['img3']) +create_mono(p, dai.CameraBoardSocket.CAM_B).link(nn.inputs['img1']) +create_mono(p, dai.CameraBoardSocket.CAM_C).link(nn.inputs['img3']) # Send bouding box from the NN to the host via XLink nn_xout = p.createXLinkOut() diff --git a/examples/NeuralNetwork/detection_parser.py b/examples/NeuralNetwork/detection_parser.py index 340fb6040..2630e2c3b 100755 --- a/examples/NeuralNetwork/detection_parser.py +++ b/examples/NeuralNetwork/detection_parser.py @@ -42,11 +42,11 @@ nn.setNumInferenceThreads(2) nn.input.setBlocking(False) -blob = dai.OpenVINO.Blob(args.nnPath); -nn.setBlob(blob); -det.setBlob(blob); -det.setNNFamily(dai.DetectionNetworkType.MOBILENET); -det.setConfidenceThreshold(0.5); +blob = dai.OpenVINO.Blob(args.nnPath) +nn.setBlob(blob) +det.setBlob(blob) +det.setNNFamily(dai.DetectionNetworkType.MOBILENET) +det.setConfidenceThreshold(0.5) # Linking if args.sync: diff --git a/examples/ObjectTracker/spatial_object_tracker.py b/examples/ObjectTracker/spatial_object_tracker.py index a181563cc..09053b09b 100755 --- a/examples/ObjectTracker/spatial_object_tracker.py +++ b/examples/ObjectTracker/spatial_object_tracker.py @@ -43,14 +43,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(args.nnPath) diff --git a/examples/Script/script_change_pipeline_flow.py b/examples/Script/script_change_pipeline_flow.py old mode 100644 new mode 100755 index 3774faf50..b528b99d7 --- a/examples/Script/script_change_pipeline_flow.py +++ b/examples/Script/script_change_pipeline_flow.py @@ -10,7 +10,7 @@ pipeline = dai.Pipeline() cam = pipeline.createColorCamera() -cam.setBoardSocket(dai.CameraBoardSocket.RGB) +cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam.setInterleaved(False) cam.setIspScale(2,3) cam.setVideoSize(720,720) diff --git a/examples/Script/script_emmc_access.py b/examples/Script/script_emmc_access.py new file mode 100644 index 000000000..a93485976 --- /dev/null +++ b/examples/Script/script_emmc_access.py @@ -0,0 +1,98 @@ +import depthai as dai +import cv2 + +# Start defining a pipeline +pipeline = dai.Pipeline() + +board = dai.BoardConfig() +board.emmc = True +pipeline.setBoardConfig(board) + +# Define source and output +camRgb = pipeline.create(dai.node.ColorCamera) +jpegEncoder = pipeline.create(dai.node.VideoEncoder) + +# Properties +camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) +jpegEncoder.setDefaultProfilePreset(1, dai.VideoEncoderProperties.Profile.MJPEG) + +#Set a write script +script_write = pipeline.createScript() +script_write.setProcessor(dai.ProcessorType.LEON_CSS) +script_write.setScript(""" + + import os + index = 1000 + import time + while True: + # Find an unused file name first + while True: + path = '/media/mmcsd-0-0/' + str(index) + '.jpg' + if not os.path.exists(path): + break + index += 1 + frame = node.io['jpeg'].get() + node.warn(f'Saving to EMMC: {path}') + with open(path, 'wb') as f: + f.write(frame.getData()) + index += 1 + time.sleep(3) + +""") + +#Set a read script +script_read = pipeline.createScript() +script_read.setProcessor(dai.ProcessorType.LEON_CSS) +script_read.setScript(""" + + import http.server + import socketserver + import socket + import fcntl + import struct + import os + + def get_ip_address(ifname): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + return socket.inet_ntoa(fcntl.ioctl( + s.fileno(), + -1071617759, # SIOCGIFADDR + struct.pack('256s', ifname[:15].encode()) + )[20:24]) + + # Note: `chdir` here will prevent unmount, this should be improved! + os.chdir('/media/mmcsd-0-0') + + PORT = 80 + Handler = http.server.SimpleHTTPRequestHandler + + with socketserver.TCPServer(("", PORT), Handler) as httpd: + ip = get_ip_address('re0') + node.warn(f'===== HTTP file server accessible at: http://{ip}') + httpd.serve_forever() + +""") + +# Linking + +camRgb.video.link(jpegEncoder.input) +jpegEncoder.bitstream.link(script_write.inputs['jpeg']) +script_write.inputs['jpeg'].setBlocking(False) +xout = pipeline.create(dai.node.XLinkOut) +xout.setStreamName("rgb") +script_read.outputs['jpeg'].link(xout.input) + + +# Pipeline defined, now the device is connected to +with dai.Device(pipeline) as device: + # Output queue will be used to get the rgb frames from the output defined above + qRgb = device.getOutputQueue(name="rgb", maxSize=100, blocking=False) + + while True: + inRgb = qRgb.tryGet() + + if inRgb is not None: + cv2.imshow("rgb", inRgb.getCvFrame()) + + if cv2.waitKey(1) == ord('q'): + break \ No newline at end of file diff --git a/examples/Script/script_forward_frames.py b/examples/Script/script_forward_frames.py old mode 100644 new mode 100755 diff --git a/examples/Script/script_uart.py b/examples/Script/script_uart.py new file mode 100644 index 000000000..329a0ed59 --- /dev/null +++ b/examples/Script/script_uart.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +''' +NOTE: This should only be run on OAK-FFC-4P, as other OAK cameras might have different GPIO configuration! +''' +import depthai as dai +import time + +# Start defining a pipeline +pipeline = dai.Pipeline() + +script = pipeline.create(dai.node.Script) +script.setScript(""" + import serial + import time + + ser = serial.Serial("/dev/ttyS0", baudrate=115200) + i = 0 + while True: + i += 1 + time.sleep(0.1) + serString = f'TEST_{i}' + ser.write(serString.encode()) +""") +# Define script for output +script.setProcessor(dai.ProcessorType.LEON_CSS) + + +config = dai.Device.Config() +# Get argument first +GPIO = dai.BoardConfig.GPIO +config.board.gpio[15] = GPIO(GPIO.OUTPUT, GPIO.ALT_MODE_2) +config.board.gpio[16] = GPIO(GPIO.INPUT, GPIO.ALT_MODE_2) +config.board.uart[0] = dai.BoardConfig.UART() + + +with dai.Device(config) as device: + device.startPipeline(pipeline) + print("Pipeline started") + while True: + time.sleep(1) diff --git a/examples/SpatialDetection/spatial_calculator_multi_roi.py b/examples/SpatialDetection/spatial_calculator_multi_roi.py index dbf7c6fce..24d29020f 100755 --- a/examples/SpatialDetection/spatial_calculator_multi_roi.py +++ b/examples/SpatialDetection/spatial_calculator_multi_roi.py @@ -3,6 +3,7 @@ import cv2 import depthai as dai import math +import numpy as np # Create pipeline pipeline = dai.Pipeline() @@ -23,13 +24,13 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) stereo.setLeftRightCheck(True) -stereo.setExtendedDisparity(True) +stereo.setSubpixel(True) spatialLocationCalculator.inputConfig.setWaitForMessage(False) # Create 10 ROIs @@ -65,8 +66,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) spatialData = spatialCalcQueue.get().getSpatialLocations() diff --git a/examples/SpatialDetection/spatial_location_calculator.py b/examples/SpatialDetection/spatial_location_calculator.py index fe7a13aa1..2e35e409f 100755 --- a/examples/SpatialDetection/spatial_location_calculator.py +++ b/examples/SpatialDetection/spatial_location_calculator.py @@ -2,7 +2,7 @@ import cv2 import depthai as dai - +import numpy as np stepSize = 0.05 newConfig = False @@ -26,16 +26,13 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) - -lrcheck = False -subpixel = False +monoRight.setCamera("right") stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) -stereo.setLeftRightCheck(lrcheck) -stereo.setSubpixel(subpixel) +stereo.setLeftRightCheck(True) +stereo.setSubpixel(True) # Config topLeft = dai.Point2f(0.4, 0.4) @@ -44,6 +41,7 @@ config = dai.SpatialLocationCalculatorConfigData() config.depthThresholds.lowerThreshold = 100 config.depthThresholds.upperThreshold = 10000 +calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN config.roi = dai.Rect(topLeft, bottomRight) spatialLocationCalculator.inputConfig.setWaitForMessage(False) @@ -76,8 +74,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) spatialData = spatialCalcQueue.get().getSpatialLocations() @@ -93,10 +93,10 @@ depthMax = depthData.depthMax fontType = cv2.FONT_HERSHEY_TRIPLEX - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, 255) - cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, 255) - cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, 255) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) + cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color) # Show the frame cv2.imshow("depth", depthFrameColor) @@ -123,10 +123,30 @@ topLeft.x += stepSize bottomRight.x += stepSize newConfig = True + elif key == ord('1'): + calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MEAN + print('Switching calculation algorithm to MEAN!') + newConfig = True + elif key == ord('2'): + calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MIN + print('Switching calculation algorithm to MIN!') + newConfig = True + elif key == ord('3'): + calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MAX + print('Switching calculation algorithm to MAX!') + newConfig = True + elif key == ord('4'): + calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MODE + print('Switching calculation algorithm to MODE!') + newConfig = True + elif key == ord('5'): + calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.MEDIAN + print('Switching calculation algorithm to MEDIAN!') + newConfig = True if newConfig: config.roi = dai.Rect(topLeft, bottomRight) - config.calculationAlgorithm = dai.SpatialLocationCalculatorAlgorithm.AVERAGE + config.calculationAlgorithm = calculationAlgorithm cfg = dai.SpatialLocationCalculatorConfig() cfg.addROI(config) spatialCalcConfigInQueue.send(cfg) diff --git a/examples/SpatialDetection/spatial_mobilenet.py b/examples/SpatialDetection/spatial_mobilenet.py index ec2eff715..42643b207 100755 --- a/examples/SpatialDetection/spatial_mobilenet.py +++ b/examples/SpatialDetection/spatial_mobilenet.py @@ -52,14 +52,15 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) +stereo.setSubpixel(True) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(nnBlobPath) @@ -113,8 +114,10 @@ depthFrame = depth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) detections = inDet.detections @@ -132,7 +135,7 @@ ymin = int(topLeft.y) xmax = int(bottomRight.x) ymax = int(bottomRight.y) - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) # Denormalize bounding box x1 = int(detection.xmin * width) diff --git a/examples/SpatialDetection/spatial_mobilenet_mono.py b/examples/SpatialDetection/spatial_mobilenet_mono.py index 30a237d2f..8c7634a72 100755 --- a/examples/SpatialDetection/spatial_mobilenet_mono.py +++ b/examples/SpatialDetection/spatial_mobilenet_mono.py @@ -53,12 +53,13 @@ imageManip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # StereoDepth stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +stereo.setSubpixel(True) # Define a neural network that will make predictions based on the source frames spatialDetectionNetwork.setConfidenceThreshold(0.5) @@ -116,8 +117,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) detections = inDet.detections diff --git a/examples/SpatialDetection/spatial_tiny_yolo.py b/examples/SpatialDetection/spatial_tiny_yolo.py index 5575bccd6..6350ca9f1 100755 --- a/examples/SpatialDetection/spatial_tiny_yolo.py +++ b/examples/SpatialDetection/spatial_tiny_yolo.py @@ -75,15 +75,16 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) +stereo.setSubpixel(True) spatialDetectionNetwork.setBlobPath(nnBlobPath) spatialDetectionNetwork.setConfidenceThreshold(0.5) @@ -146,8 +147,10 @@ frame = inPreview.getCvFrame() depthFrame = depth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) counter+=1 @@ -172,7 +175,7 @@ ymin = int(topLeft.y) xmax = int(bottomRight.x) ymax = int(bottomRight.y) - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) # Denormalize bounding box x1 = int(detection.xmin * width) diff --git a/examples/StereoDepth/depth_crop_control.py b/examples/StereoDepth/depth_crop_control.py index eae2677a4..70dcb5a11 100755 --- a/examples/StereoDepth/depth_crop_control.py +++ b/examples/StereoDepth/depth_crop_control.py @@ -32,14 +32,15 @@ bottomRight = dai.Point2f(0.8, 0.8) # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setCamera("right") +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) manip.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) manip.setMaxOutputFrameSize(monoRight.getResolutionHeight()*monoRight.getResolutionWidth()*3) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +stereo.setSubpixel(True) # Linking configIn.out.link(manip.inputConfig) @@ -62,8 +63,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters # Frame is transformed, the color map will be applied to highlight the depth info - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) # Frame is ready to be shown diff --git a/examples/StereoDepth/depth_post_processing.py b/examples/StereoDepth/depth_post_processing.py index b1844b1ca..732e91232 100755 --- a/examples/StereoDepth/depth_post_processing.py +++ b/examples/StereoDepth/depth_post_processing.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_preview.py b/examples/StereoDepth/depth_preview.py index 4b8b6b10e..fd7433b06 100755 --- a/examples/StereoDepth/depth_preview.py +++ b/examples/StereoDepth/depth_preview.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_preview_lr.py b/examples/StereoDepth/depth_preview_lr.py new file mode 100755 index 000000000..8bf6fff6d --- /dev/null +++ b/examples/StereoDepth/depth_preview_lr.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import numpy as np + +# Closer-in minimum depth, disparity range is doubled (from 95 to 190): +extended_disparity = True +# Better accuracy for longer distance, fractional disparity 32-levels: +subpixel = True +# Better handling for occlusions: +lr_check = True + +enableRectified = False + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +left = pipeline.create(dai.node.ColorCamera) +center = pipeline.create(dai.node.ColorCamera) +right = pipeline.create(dai.node.ColorCamera) +LC_depth = pipeline.create(dai.node.StereoDepth) +LR_depth = pipeline.create(dai.node.StereoDepth) +CR_depth = pipeline.create(dai.node.StereoDepth) + +xout_LC = pipeline.create(dai.node.XLinkOut) +xout_LR = pipeline.create(dai.node.XLinkOut) +xout_CR = pipeline.create(dai.node.XLinkOut) + +xout_LC.setStreamName("disparity_LC") +if enableRectified: + xoutl_LC = pipeline.create(dai.node.XLinkOut) + xoutr_LC = pipeline.create(dai.node.XLinkOut) + xoutl_LC.setStreamName("rectifiedLeft_LC") + xoutr_LC.setStreamName("rectifiedRight_LC") + +xout_LR.setStreamName("disparity_LR") +if enableRectified: + xoutl_LR = pipeline.create(dai.node.XLinkOut) + xoutr_LR = pipeline.create(dai.node.XLinkOut) + xoutl_LR.setStreamName("rectifiedLeft_LR") + xoutr_LR.setStreamName("rectifiedRight_LR") + +xout_CR.setStreamName("disparity_CR") +if enableRectified: + xoutl_CR = pipeline.create(dai.node.XLinkOut) + xoutr_CR = pipeline.create(dai.node.XLinkOut) + xoutl_CR.setStreamName("rectifiedLeft_CR") + xoutr_CR.setStreamName("rectifiedRight_CR") + +# Properties +left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) +left.setCamera("left") +left.setIspScale(2, 3) + +center.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) +center.setBoardSocket(dai.CameraBoardSocket.CENTER) +center.setIspScale(2, 3) + +right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) +right.setCamera("right") +right.setIspScale(2, 3) + +LC_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +LC_depth.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF) +LC_depth.setLeftRightCheck(lr_check) +LC_depth.setExtendedDisparity(extended_disparity) +LC_depth.setSubpixel(subpixel) + +LR_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +LR_depth.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF) +LR_depth.setLeftRightCheck(lr_check) +LR_depth.setExtendedDisparity(extended_disparity) +LR_depth.setSubpixel(subpixel) + +CR_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +CR_depth.initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF) +CR_depth.setLeftRightCheck(lr_check) +CR_depth.setExtendedDisparity(extended_disparity) +CR_depth.setSubpixel(subpixel) + +# Linking +# LC +left.isp.link(LC_depth.left) +center.isp.link(LC_depth.right) +LC_depth.disparity.link(xout_LC.input) +if enableRectified: + LC_depth.rectifiedLeft.link(xoutl_LC.input) + LC_depth.rectifiedRight.link(xoutr_LC.input) +# LR +left.isp.link(LR_depth.left) +right.isp.link(LR_depth.right) +LR_depth.disparity.link(xout_LR.input) +if enableRectified: + LR_depth.rectifiedLeft.link(xoutl_LR.input) + LR_depth.rectifiedRight.link(xoutr_LR.input) +# CR +center.isp.link(CR_depth.left) +right.isp.link(CR_depth.right) +CR_depth.disparity.link(xout_CR.input) +if enableRectified: + CR_depth.rectifiedLeft.link(xoutl_CR.input) + CR_depth.rectifiedRight.link(xoutr_CR.input) + +maxDisp = LC_depth.initialConfig.getMaxDisparity() + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + while not device.isClosed(): + queueNames = device.getQueueEvents() + for q in queueNames: + message = device.getOutputQueue(q).get() + # Display arrived frames + if type(message) == dai.ImgFrame: + frame = message.getCvFrame() + if 'disparity' in q: + disp = (frame * (255.0 / maxDisp)).astype(np.uint8) + disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET) + cv2.imshow(q, disp) + else: + cv2.imshow(q, frame) + if cv2.waitKey(1) == ord('q'): + break \ No newline at end of file diff --git a/examples/StereoDepth/rgb_depth_aligned.py b/examples/StereoDepth/rgb_depth_aligned.py index c76dd7b79..3e72fc023 100755 --- a/examples/StereoDepth/rgb_depth_aligned.py +++ b/examples/StereoDepth/rgb_depth_aligned.py @@ -48,7 +48,7 @@ def updateBlendWeights(percent_rgb): queueNames.append("disp") #Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setFps(fps) if downscaleColor: camRgb.setIspScale(2, 3) @@ -56,22 +56,22 @@ def updateBlendWeights(percent_rgb): # This value was used during calibration try: calibData = device.readCalibration2() - lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB) + lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A) if lensPosition: camRgb.initialControl.setManualFocus(lensPosition) except: raise left.setResolution(monoResolution) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(fps) right.setResolution(monoResolution) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(fps) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # LR-check is required for depth alignment stereo.setLeftRightCheck(True) -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) # Linking camRgb.isp.link(rgbOut.input) diff --git a/examples/StereoDepth/rgb_depth_confidence_aligned.py b/examples/StereoDepth/rgb_depth_confidence_aligned.py index e1487c9ac..226707a81 100755 --- a/examples/StereoDepth/rgb_depth_confidence_aligned.py +++ b/examples/StereoDepth/rgb_depth_confidence_aligned.py @@ -68,7 +68,7 @@ def updateConfBlendWeights(percent): queueNames.append("disp") #Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setFps(fps) if downscaleColor: camRgb.setIspScale(2, 3) @@ -76,24 +76,24 @@ def updateConfBlendWeights(percent): # This value was used during calibration try: calibData = device.readCalibration2() - lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB) + lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A) if lensPosition: camRgb.initialControl.setManualFocus(lensPosition) except: raise left.setResolution(monoResolution) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(fps) right.setResolution(monoResolution) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(fps) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # LR-check is required for depth alignment stereo.setLeftRightCheck(True) if 0: stereo.setSubpixel(True) # TODO enable for test -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) xoutConfMap = pipeline.create(dai.node.XLinkOut) xoutConfMap.setStreamName('confidence_map') diff --git a/examples/StereoDepth/stereo_depth_custom_mesh.py b/examples/StereoDepth/stereo_depth_custom_mesh.py new file mode 100644 index 000000000..6cd398698 --- /dev/null +++ b/examples/StereoDepth/stereo_depth_custom_mesh.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 + +import cv2 +import numpy as np +import depthai as dai +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument("-res", "--resolution", type=str, default="720", + help="Sets the resolution on mono cameras. Options: 800 | 720 | 400") +parser.add_argument("-md", "--mesh_dir", type=str, default=None, + help="Output directory for mesh files. If not specified mesh files won't be saved") +parser.add_argument("-lm", "--load_mesh", default=False, action="store_true", + help="Read camera intrinsics, generate mesh files and load them into the stereo node.") +args = parser.parse_args() + +meshDirectory = args.mesh_dir # Output dir for mesh files +generateMesh = args.load_mesh # Load mesh files +RES_MAP = { + '800': {'w': 1280, 'h': 800, 'res': dai.MonoCameraProperties.SensorResolution.THE_800_P }, + '720': {'w': 1280, 'h': 720, 'res': dai.MonoCameraProperties.SensorResolution.THE_720_P }, + '400': {'w': 640, 'h': 400, 'res': dai.MonoCameraProperties.SensorResolution.THE_400_P } +} +if args.resolution not in RES_MAP: + exit("Unsupported resolution!") + +resolution = RES_MAP[args.resolution] + +def getMesh(calibData): + M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, resolution['w'], resolution['h'])) + d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) + R1 = np.array(calibData.getStereoLeftRectificationRotation()) + M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, resolution['w'], resolution['h'])) + d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) + R2 = np.array(calibData.getStereoRightRectificationRotation()) + mapXL, mapYL = cv2.initUndistortRectifyMap(M1, d1, R1, M2, (resolution['w'], resolution['h']), cv2.CV_32FC1) + mapXR, mapYR = cv2.initUndistortRectifyMap(M2, d2, R2, M2, (resolution['w'], resolution['h']), cv2.CV_32FC1) + + meshCellSize = 16 + meshLeft = [] + meshRight = [] + + for y in range(mapXL.shape[0] + 1): + if y % meshCellSize == 0: + rowLeft = [] + rowRight = [] + for x in range(mapXL.shape[1] + 1): + if x % meshCellSize == 0: + if y == mapXL.shape[0] and x == mapXL.shape[1]: + rowLeft.append(mapYL[y - 1, x - 1]) + rowLeft.append(mapXL[y - 1, x - 1]) + rowRight.append(mapYR[y - 1, x - 1]) + rowRight.append(mapXR[y - 1, x - 1]) + elif y == mapXL.shape[0]: + rowLeft.append(mapYL[y - 1, x]) + rowLeft.append(mapXL[y - 1, x]) + rowRight.append(mapYR[y - 1, x]) + rowRight.append(mapXR[y - 1, x]) + elif x == mapXL.shape[1]: + rowLeft.append(mapYL[y, x - 1]) + rowLeft.append(mapXL[y, x - 1]) + rowRight.append(mapYR[y, x - 1]) + rowRight.append(mapXR[y, x - 1]) + else: + rowLeft.append(mapYL[y, x]) + rowLeft.append(mapXL[y, x]) + rowRight.append(mapYR[y, x]) + rowRight.append(mapXR[y, x]) + if (mapXL.shape[1] % meshCellSize) % 2 != 0: + rowLeft.append(0) + rowLeft.append(0) + rowRight.append(0) + rowRight.append(0) + + meshLeft.append(rowLeft) + meshRight.append(rowRight) + + meshLeft = np.array(meshLeft) + meshRight = np.array(meshRight) + + return meshLeft, meshRight + +def saveMeshFiles(meshLeft, meshRight, outputPath): + print("Saving mesh to:", outputPath) + meshLeft.tofile(outputPath + "/left_mesh.calib") + meshRight.tofile(outputPath + "/right_mesh.calib") + + +def create_pipeline(device: dai.Device) -> dai.Pipeline: + calibData = device.readCalibration() + print("Creating Stereo Depth pipeline") + pipeline = dai.Pipeline() + + camLeft = pipeline.create(dai.node.MonoCamera) + camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) + + camRight = pipeline.create(dai.node.MonoCamera) + camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) + + xoutRight = pipeline.create(dai.node.XLinkOut) + xoutRight.setStreamName("right") + camRight.out.link(xoutRight.input) + + for monoCam in (camLeft, camRight): # Common config + monoCam.setResolution(resolution['res']) + # monoCam.setFps(20.0) + + stereo = pipeline.create(dai.node.StereoDepth) + camLeft.out.link(stereo.left) + camRight.out.link(stereo.right) + stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) + stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout + stereo.setLeftRightCheck(True) + stereo.setExtendedDisparity(True) + + + + + xoutDisparity = pipeline.create(dai.node.XLinkOut) + xoutDisparity.setStreamName("disparity") + stereo.disparity.link(xoutDisparity.input) + + xoutRectifRight = pipeline.create(dai.node.XLinkOut) + xoutRectifRight.setStreamName("rectifiedRight") + stereo.rectifiedRight.link(xoutRectifRight.input) + + # Create custom meshes from calibration data. Here you could also + # load your own mesh files, or generate them in any other way. + leftMesh, rightMesh = getMesh(calibData) + if generateMesh: + meshLeft = list(leftMesh.tobytes()) + meshRight = list(rightMesh.tobytes()) + # Load mesh data to the StereoDepth node + stereo.loadMeshData(meshLeft, meshRight) + + if meshDirectory is not None: + saveMeshFiles(leftMesh, rightMesh, meshDirectory) + return pipeline + +with dai.Device() as device: + device.startPipeline(create_pipeline(device)) + + # Create a receive queue for each stream + qList = [device.getOutputQueue(stream, 8, blocking=False) for stream in ['right', 'rectifiedRight', 'disparity']] + + while True: + for q in qList: + name = q.getName() + frame = q.get().getCvFrame() + cv2.imshow(name, frame) + if cv2.waitKey(1) == ord("q"): + break diff --git a/examples/StereoDepth/stereo_depth_from_host.py b/examples/StereoDepth/stereo_depth_from_host.py index 8a48a21dc..56e155d02 100755 --- a/examples/StereoDepth/stereo_depth_from_host.py +++ b/examples/StereoDepth/stereo_depth_from_host.py @@ -156,6 +156,8 @@ def destroyWindow(self): trSpatialNumIterations = list() trDecimationFactor = list() trDisparityShift = list() + trCenterAlignmentShift = list() + trInvalidateEdgePixels = list() def trackbarSigma(value): StereoConfigHandler.config.postProcessing.bilateralSigmaValue = value @@ -279,6 +281,23 @@ def trackbarDisparityShift(value): for tr in StereoConfigHandler.trDisparityShift: tr.set(value) + def trackbarCenterAlignmentShift(value): + if StereoConfigHandler.config.algorithmControl.depthAlign != dai.StereoDepthConfig.AlgorithmControl.DepthAlign.CENTER: + print("Center alignment shift factor requires CENTER alignment enabled!") + return + StereoConfigHandler.config.algorithmControl.centerAlignmentShiftFactor = value / 100. + print(f"centerAlignmentShiftFactor: {StereoConfigHandler.config.algorithmControl.centerAlignmentShiftFactor:.2f}") + StereoConfigHandler.newConfig = True + for tr in StereoConfigHandler.trCenterAlignmentShift: + tr.set(value) + + def trackbarInvalidateEdgePixels(value): + StereoConfigHandler.config.algorithmControl.numInvalidateEdgePixels = value + print(f"numInvalidateEdgePixels: {StereoConfigHandler.config.algorithmControl.numInvalidateEdgePixels:.2f}") + StereoConfigHandler.newConfig = True + for tr in StereoConfigHandler.trInvalidateEdgePixels: + tr.set(value) + def handleKeypress(key, stereoDepthConfigInQueue): if key == ord('m'): StereoConfigHandler.newConfig = True @@ -416,6 +435,8 @@ def registerWindow(stream): StereoConfigHandler.trLrCheck.append(StereoConfigHandler.Trackbar('LR-check threshold', stream, 0, 16, StereoConfigHandler.config.algorithmControl.leftRightCheckThreshold, StereoConfigHandler.trackbarLrCheckThreshold)) StereoConfigHandler.trFractionalBits.append(StereoConfigHandler.Trackbar('Subpixel fractional bits', stream, 3, 5, StereoConfigHandler.config.algorithmControl.subpixelFractionalBits, StereoConfigHandler.trackbarFractionalBits)) StereoConfigHandler.trDisparityShift.append(StereoConfigHandler.Trackbar('Disparity shift', stream, 0, 100, StereoConfigHandler.config.algorithmControl.disparityShift, StereoConfigHandler.trackbarDisparityShift)) + StereoConfigHandler.trCenterAlignmentShift.append(StereoConfigHandler.Trackbar('Center alignment shift factor', stream, 0, 100, StereoConfigHandler.config.algorithmControl.centerAlignmentShiftFactor, StereoConfigHandler.trackbarCenterAlignmentShift)) + StereoConfigHandler.trInvalidateEdgePixels.append(StereoConfigHandler.Trackbar('Invalidate edge pixels', stream, 0, 100, StereoConfigHandler.config.algorithmControl.numInvalidateEdgePixels, StereoConfigHandler.trackbarInvalidateEdgePixels)) StereoConfigHandler.trLineqAlpha.append(StereoConfigHandler.Trackbar('Linear equation alpha', stream, 0, 15, StereoConfigHandler.config.costMatching.linearEquationParameters.alpha, StereoConfigHandler.trackbarLineqAlpha)) StereoConfigHandler.trLineqBeta.append(StereoConfigHandler.Trackbar('Linear equation beta', stream, 0, 15, StereoConfigHandler.config.costMatching.linearEquationParameters.beta, StereoConfigHandler.trackbarLineqBeta)) StereoConfigHandler.trLineqThreshold.append(StereoConfigHandler.Trackbar('Linear equation threshold', stream, 0, 255, StereoConfigHandler.config.costMatching.linearEquationParameters.threshold, StereoConfigHandler.trackbarLineqThreshold)) @@ -569,6 +590,7 @@ def __init__(self, config): if args.dumpdisparitycostvalues: stereo.debugDispCostDump.link(xoutDebugCostDump.input) + StereoConfigHandler(stereo.initialConfig.get()) StereoConfigHandler.registerWindow('Stereo control panel') @@ -619,7 +641,7 @@ def convertToCv2Frame(name, image, config): elif 'disparity' in name: if 1: # Optionally, extend disparity range to better visualize it frame = (frame * 255. / maxDisp).astype(np.uint8) - + return frame # if 1: # Optionally, apply a color map # frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT) @@ -630,8 +652,8 @@ def convertToCv2Frame(name, image, config): with dai.Device(pipeline) as device: stereoDepthConfigInQueue = device.getInputQueue("stereoDepthConfig") - inStreams = ['in_right', 'in_left'] - inStreamsCameraID = [dai.CameraBoardSocket.RIGHT, dai.CameraBoardSocket.LEFT] + inStreams = ['in_left', 'in_right'] + inStreamsCameraID = [dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C] in_q_list = [] for s in inStreams: q = device.getInputQueue(s) @@ -670,8 +692,6 @@ def convertToCv2Frame(name, image, config): img.setWidth(width) img.setHeight(height) q.send(img) - if timestamp_ms == 0: # Send twice for first iteration - q.send(img) # print("Sent frame: {:25s}".format(path), 'timestamp_ms:', timestamp_ms) timestamp_ms += frame_interval_ms index = (index + 1) % dataset_size diff --git a/examples/StereoDepth/stereo_depth_video.py b/examples/StereoDepth/stereo_depth_video.py index 8fcc02fec..2c59d65dd 100755 --- a/examples/StereoDepth/stereo_depth_video.py +++ b/examples/StereoDepth/stereo_depth_video.py @@ -69,13 +69,32 @@ action="store_true", help="Display depth frames", ) +parser.add_argument( + "-swlr", + "--swap_left_right", + default=False, + action="store_true", + help="Swap left right frames", +) +parser.add_argument( + "-a", + "--alpha", + type=float, + default=None, + help="Alpha scaling parameter to increase FOV", +) args = parser.parse_args() -resolutionMap = {"800": (1280, 800), "720": (1280, 720), "400": (640, 400)} -if args.resolution not in resolutionMap: +RES_MAP = { + '800': {'w': 1280, 'h': 800, 'res': dai.MonoCameraProperties.SensorResolution.THE_800_P }, + '720': {'w': 1280, 'h': 720, 'res': dai.MonoCameraProperties.SensorResolution.THE_720_P }, + '400': {'w': 640, 'h': 400, 'res': dai.MonoCameraProperties.SensorResolution.THE_400_P } +} +if args.resolution not in RES_MAP: exit("Unsupported resolution!") -resolution = resolutionMap[args.resolution] +resolution = RES_MAP[args.resolution] + meshDirectory = args.mesh_dir # Output dir for mesh files generateMesh = args.load_mesh # Load mesh files @@ -97,7 +116,7 @@ median = medianMap[args.median] print("StereoDepth config options:") -print(" Resolution: ", resolution) +print(f" Resolution: {resolution['w']}x{resolution['h']}") print(" Left-Right check: ", lrcheck) print(" Extended disparity:", extended) print(" Subpixel: ", subpixel) @@ -107,11 +126,11 @@ def getMesh(calibData): - M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, resolution[0], resolution[1])) - d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.LEFT)) + M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, resolution[0], resolution[1])) + d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) R1 = np.array(calibData.getStereoLeftRectificationRotation()) - M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, resolution[0], resolution[1])) - d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RIGHT)) + M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, resolution[0], resolution[1])) + d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) R2 = np.array(calibData.getStereoRightRectificationRotation()) mapXL, mapYL = cv2.initUndistortRectifyMap(M1, d1, R1, M2, resolution, cv2.CV_32FC1) mapXR, mapYR = cv2.initUndistortRectifyMap(M2, d2, R2, M2, resolution, cv2.CV_32FC1) @@ -167,14 +186,15 @@ def saveMeshFiles(meshLeft, meshRight, outputPath): meshRight.tofile(outputPath + "/right_mesh.calib") -def getDisparityFrame(frame): +def getDisparityFrame(frame, cvColorMap): maxDisp = stereo.initialConfig.getMaxDisparity() disp = (frame * (255.0 / maxDisp)).astype(np.uint8) - disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET) + disp = cv2.applyColorMap(disp, cvColorMap) return disp - +device = dai.Device() +calibData = device.readCalibration() print("Creating Stereo Depth pipeline") pipeline = dai.Pipeline() @@ -188,17 +208,15 @@ def getDisparityFrame(frame): xoutRectifLeft = pipeline.create(dai.node.XLinkOut) xoutRectifRight = pipeline.create(dai.node.XLinkOut) -camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -res = ( - dai.MonoCameraProperties.SensorResolution.THE_800_P - if resolution[1] == 800 - else dai.MonoCameraProperties.SensorResolution.THE_720_P - if resolution[1] == 720 - else dai.MonoCameraProperties.SensorResolution.THE_400_P -) +if args.swap_left_right: + camLeft.setCamera("right") + camRight.setCamera("left") +else: + camLeft.setCamera("left") + camRight.setCamera("right") + for monoCam in (camLeft, camRight): # Common config - monoCam.setResolution(res) + monoCam.setResolution(resolution['res']) # monoCam.setFps(20.0) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) @@ -207,6 +225,11 @@ def getDisparityFrame(frame): stereo.setLeftRightCheck(lrcheck) stereo.setExtendedDisparity(extended) stereo.setSubpixel(subpixel) +if args.alpha is not None: + stereo.setAlphaScaling(args.alpha) + config = stereo.initialConfig.get() + config.postProcessing.brightnessFilter.minBrightness = 0 + stereo.initialConfig.set(config) xoutLeft.setStreamName("left") xoutRight.setStreamName("right") @@ -233,18 +256,8 @@ def getDisparityFrame(frame): if depth: streams.append("depth") -device = dai.Device() -calibData = device.readCalibration() -leftMesh, rightMesh = getMesh(calibData) -if generateMesh: - meshLeft = list(leftMesh.tobytes()) - meshRight = list(rightMesh.tobytes()) - stereo.loadMeshData(meshLeft, meshRight) - -if meshDirectory is not None: - saveMeshFiles(leftMesh, rightMesh, meshDirectory) - - +cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) +cvColorMap[0] = [0, 0, 0] print("Creating DepthAI device") with device: device.startPipeline(pipeline) @@ -259,7 +272,7 @@ def getDisparityFrame(frame): if name == "depth": frame = frame.astype(np.uint16) elif name == "disparity": - frame = getDisparityFrame(frame) + frame = getDisparityFrame(frame, cvColorMap) cv2.imshow(name, frame) if cv2.waitKey(1) == ord("q"): diff --git a/examples/ToF/tof_depth.py b/examples/ToF/tof_depth.py new file mode 100644 index 000000000..5a08b5d0c --- /dev/null +++ b/examples/ToF/tof_depth.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import numpy as np + +pipeline = dai.Pipeline() + +cam_a = pipeline.create(dai.node.Camera) +# We assume the ToF camera sensor is on port CAM_A +cam_a.setBoardSocket(dai.CameraBoardSocket.CAM_A) + +tof = pipeline.create(dai.node.ToF) + +# Configure the ToF node +tofConfig = tof.initialConfig.get() +# tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MIN +tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MAX +tofConfig.depthParams.avgPhaseShuffle = False +tofConfig.depthParams.minimumAmplitude = 3.0 +tof.initialConfig.set(tofConfig) +# Link the ToF sensor to the ToF node +cam_a.raw.link(tof.input) + +xout = pipeline.create(dai.node.XLinkOut) +xout.setStreamName("depth") +tof.depth.link(xout.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + print('Connected cameras:', device.getConnectedCameraFeatures()) + q = device.getOutputQueue(name="depth") + + while True: + imgFrame = q.get() # blocking call, will wait until a new data has arrived + depth_map = imgFrame.getFrame() + + # Colorize the depth frame to jet colormap + depth_downscaled = depth_map[::4] + non_zero_depth = depth_downscaled[depth_downscaled != 0] # Remove invalid depth values + if len(non_zero_depth) == 0: + min_depth, max_depth = 0, 0 + else: + min_depth = np.percentile(non_zero_depth, 3) + max_depth = np.percentile(non_zero_depth, 97) + depth_colorized = np.interp(depth_map, (min_depth, max_depth), (0, 255)).astype(np.uint8) + depth_colorized = cv2.applyColorMap(depth_colorized, cv2.COLORMAP_JET) + + cv2.imshow("Colorized depth", depth_colorized) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/VideoEncoder/disparity_colormap_encoding.py b/examples/VideoEncoder/disparity_colormap_encoding.py index 09d602b10..c0d2c8ce1 100755 --- a/examples/VideoEncoder/disparity_colormap_encoding.py +++ b/examples/VideoEncoder/disparity_colormap_encoding.py @@ -8,11 +8,11 @@ # Create left/right mono cameras for Stereo depth monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight = pipeline.create(dai.node.MonoCamera) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map depth = pipeline.create(dai.node.StereoDepth) diff --git a/examples/VideoEncoder/disparity_encoding.py b/examples/VideoEncoder/disparity_encoding.py index 951e1c1b8..1e9483d4f 100755 --- a/examples/VideoEncoder/disparity_encoding.py +++ b/examples/VideoEncoder/disparity_encoding.py @@ -10,11 +10,11 @@ # Create left/right mono cameras for Stereo depth monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight = pipeline.create(dai.node.MonoCamera) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map depth = pipeline.create(dai.node.StereoDepth) diff --git a/examples/VideoEncoder/encoding_max_limit.py b/examples/VideoEncoder/encoding_max_limit.py index b01f430ed..94c0b555d 100755 --- a/examples/VideoEncoder/encoding_max_limit.py +++ b/examples/VideoEncoder/encoding_max_limit.py @@ -22,10 +22,10 @@ ve3Out.setStreamName('ve3Out') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoLeft.setCamera("left") +monoRight.setCamera("right") # Setting to 26fps will trigger error ve1.setDefaultProfilePreset(25, dai.VideoEncoderProperties.Profile.H264_MAIN) diff --git a/examples/VideoEncoder/rgb_encoding.py b/examples/VideoEncoder/rgb_encoding.py index 45b51f991..3d5eadcdd 100755 --- a/examples/VideoEncoder/rgb_encoding.py +++ b/examples/VideoEncoder/rgb_encoding.py @@ -13,7 +13,7 @@ xout.setStreamName('h265') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/VideoEncoder/rgb_full_resolution_saver.py b/examples/VideoEncoder/rgb_full_resolution_saver.py index 381784f46..d2c06640a 100755 --- a/examples/VideoEncoder/rgb_full_resolution_saver.py +++ b/examples/VideoEncoder/rgb_full_resolution_saver.py @@ -9,7 +9,7 @@ pipeline = dai.Pipeline() camRgb = pipeline.create(dai.node.ColorCamera) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) xoutRgb = pipeline.create(dai.node.XLinkOut) @@ -56,7 +56,7 @@ with open(fName, "wb") as f: f.write(qStill.get().getData()) print('Image saved to', fName) - + key = cv2.waitKey(1) if key == ord('q'): break diff --git a/examples/VideoEncoder/rgb_mono_encoding.py b/examples/VideoEncoder/rgb_mono_encoding.py index 09ec1c036..5aeffb92e 100755 --- a/examples/VideoEncoder/rgb_mono_encoding.py +++ b/examples/VideoEncoder/rgb_mono_encoding.py @@ -22,9 +22,9 @@ ve3Out.setStreamName('ve3Out') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) +monoLeft.setCamera("left") +monoRight.setCamera("right") # Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding ve1.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H264_MAIN) ve2.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/Warp/warp_mesh.py b/examples/Warp/warp_mesh.py old mode 100644 new mode 100755 diff --git a/examples/Warp/warp_mesh_interactive.py b/examples/Warp/warp_mesh_interactive.py new file mode 100755 index 000000000..6065dd951 --- /dev/null +++ b/examples/Warp/warp_mesh_interactive.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +import cv2 +import depthai as dai +import numpy as np +import argparse +import re +import sys +from random import randint + +parser = argparse.ArgumentParser() +parser.add_argument("-m", "--mesh_dims", type=str, default="4x4", help="mesh dimensions widthxheight (default=%(default)s)") +parser.add_argument("-r", "--resolution", type=str, default="512x512", help="preview resolution (default=%(default)s)") +parser.add_argument("-rnd", "--random", action="store_true", help="Generate random initial mesh") +args = parser.parse_args() + +# mesh dimensions +match = re.search(r'.*?(\d+)x(\d+).*', args.mesh_dims) +if not match: + raise Exception(f"Mesh dimensions format incorrect '{args.resolution}'!") +mesh_w = int(match.group(1)) +mesh_h = int(match.group(2)) + +# Preview resolution +match = re.search(r'.*?(\d+)x(\d+).*', args.resolution) +if not match: + raise Exception(f"Resolution format incorrect '{args.resolution}'!") +preview_w = int(match.group(1)) +preview_h = int(match.group(2)) +if preview_w % 16 != 0: + raise Exception(f"Preview width must be a multiple of 16!") + +# Create an initial mesh (optionally random) of dimension mesh_w x mesh_h +first_point_x = int(preview_w / 10) +between_points_x = int(4 * preview_w / (5 * (mesh_w - 1))) +first_point_y = int(preview_h / 10) +between_points_y = int(4 * preview_h / (5 * (mesh_h - 1))) +if args.random: + max_rnd_x = int(between_points_x / 4) + max_rnd_y = int(between_points_y / 4) +mesh = [] +for i in range(mesh_h): + for j in range(mesh_w): + x = first_point_x + j * between_points_x + y = first_point_y + i * between_points_y + if args.random: + rnd_x = randint(-max_rnd_x, max_rnd_x) + if x + rnd_x > 0 and x + rnd_x < preview_w: + x += rnd_x + rnd_y = randint(-max_rnd_y, max_rnd_y) + if y + rnd_y > 0 and y + rnd_y < preview_h: + y += rnd_y + mesh.append((x, y)) + +def create_pipeline(mesh): + print(mesh) + # Create pipeline + pipeline = dai.Pipeline() + + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setPreviewSize(preview_w, preview_h) + camRgb.setInterleaved(False) + width = camRgb.getPreviewWidth() + height = camRgb.getPreviewHeight() + + # Output source + xout_source = pipeline.create(dai.node.XLinkOut) + xout_source.setStreamName('source') + camRgb.preview.link(xout_source.input) + # Warp source frame + warp = pipeline.create(dai.node.Warp) + warp.setWarpMesh(mesh, mesh_w, mesh_h) + warp.setOutputSize(width, height) + warp.setMaxOutputFrameSize(width * height * 3) + camRgb.preview.link(warp.inputImage) + + warp.setHwIds([1]) + warp.setInterpolation(dai.node.Warp.Properties.Interpolation.BYPASS) + # Output warped + xout_warped = pipeline.create(dai.node.XLinkOut) + xout_warped.setStreamName('warped') + warp.out.link(xout_warped.input) + return pipeline + +point_selected = None + +def mouse_callback(event, x, y, flags, param): + global mesh, point_selected, mesh_changed + if event == cv2.EVENT_LBUTTONDOWN: + if point_selected is None: + # Which point is selected ? + min_dist = 100 + + for i in range(len(mesh)): + dist = np.linalg.norm((x - mesh[i][0], y - mesh[i][1])) + if dist < 20 and dist < min_dist: + min_dist = dist + point_selected = i + if point_selected is not None: + mesh[point_selected] = (x, y) + mesh_changed = True + + elif event == cv2.EVENT_LBUTTONUP: + point_selected = None + elif event == cv2.EVENT_MOUSEMOVE: + if point_selected is not None: + mesh[point_selected] = (x, y) + mesh_changed = True + + +cv2.namedWindow("Source") +cv2.setMouseCallback("Source", mouse_callback) + +running = True + +print("Use your mouse to modify the mesh by clicking/moving points of the mesh in the Source window") +print("Then press 'r' key to restart the device/pipeline") +while running: + pipeline = create_pipeline(mesh) + # Connect to device and start pipeline + with dai.Device(pipeline) as device: + print("Starting device") + # Output queue will be used to get the rgb frames from the output defined above + q_source = device.getOutputQueue(name="source", maxSize=4, blocking=False) + q_warped = device.getOutputQueue(name="warped", maxSize=4, blocking=False) + + restart_device = False + mesh_changed = False + while not restart_device: + in0 = q_source.get() + if in0 is not None: + source = in0.getCvFrame() + color = (0, 0,255) if mesh_changed else (0,255,0) + for i in range(len(mesh)): + cv2.circle(source, (mesh[i][0], mesh[i][1]), 4, color, -1) + if i % mesh_w != mesh_w -1: + cv2.line(source, (mesh[i][0], mesh[i][1]), (mesh[i+1][0], mesh[i+1][1]), color, 2) + if i + mesh_w < len(mesh): + cv2.line(source, (mesh[i][0], mesh[i][1]), (mesh[i+mesh_w][0], mesh[i+mesh_w][1]), color, 2) + cv2.imshow("Source", source) + + in1 = q_warped.get() + if in1 is not None: + cv2.imshow("Warped", in1.getCvFrame()) + + key = cv2.waitKey(1) + if key == ord('r'): # Restart the device if mesh has changed + if mesh_changed: + print("Restart requested...") + mesh_changed = False + restart_device = True + elif key == 27 or key == ord('q'): # Exit + running = False + break \ No newline at end of file diff --git a/examples/bootloader/poe_set_ip.py b/examples/bootloader/poe_set_ip.py old mode 100644 new mode 100755 diff --git a/examples/bootloader/read_flash_memory.py b/examples/bootloader/read_flash_memory.py old mode 100644 new mode 100755 diff --git a/examples/calibration/calibration_flash.py b/examples/calibration/calibration_flash.py index 34a552d50..325902921 100755 --- a/examples/calibration/calibration_flash.py +++ b/examples/calibration/calibration_flash.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 from pathlib import Path -import cv2 import depthai as dai import argparse @@ -13,7 +12,7 @@ args = parser.parse_args() # Connect device -with dai.Device(dai.OpenVINO.VERSION_2021_4, dai.UsbSpeed.HIGH) as device: +with dai.Device(dai.OpenVINO.VERSION_UNIVERSAL, dai.UsbSpeed.HIGH) as device: deviceCalib = device.readCalibration() deviceCalib.eepromToJsonFile(calibBackUpFile) diff --git a/examples/calibration/calibration_load.py b/examples/calibration/calibration_load.py index 9a2995a22..44e8996dc 100755 --- a/examples/calibration/calibration_load.py +++ b/examples/calibration/calibration_load.py @@ -28,10 +28,10 @@ # MonoCamera monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") # monoLeft.setFps(5.0) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # monoRight.setFps(5.0) # Linking diff --git a/examples/calibration/calibration_reader.py b/examples/calibration/calibration_reader.py index c49b93b59..9f3d82564 100755 --- a/examples/calibration/calibration_reader.py +++ b/examples/calibration/calibration_reader.py @@ -14,66 +14,66 @@ calibData = device.readCalibration() calibData.eepromToJsonFile(calibFile) - M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.RGB) + M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_A) print("RGB Camera Default intrinsics...") print(M_rgb) print(width) print(height) if "OAK-1" in calibData.getEepromData().boardName or "BW1093OAK" in calibData.getEepromData().boardName: - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 1280, 720)) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 1280, 720)) print("RGB Camera resized intrinsics...") print(M_rgb) - D_rgb = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RGB)) + D_rgb = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_A)) print("RGB Distortion Coefficients...") [print(name + ": " + value) for (name, value) in zip(["k1", "k2", "p1", "p2", "k3", "k4", "k5", "k6", "s1", "s2", "s3", "s4", "τx", "τy"], [str(data) for data in D_rgb])] - print(f'RGB FOV {calibData.getFov(dai.CameraBoardSocket.RGB)}') + print(f'RGB FOV {calibData.getFov(dai.CameraBoardSocket.CAM_A)}') else: - M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.RGB) + M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_A) print("RGB Camera Default intrinsics...") print(M_rgb) print(width) print(height) - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 3840, 2160)) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 3840, 2160)) print("RGB Camera resized intrinsics... 3840 x 2160 ") print(M_rgb) - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 4056, 3040 )) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 4056, 3040 )) print("RGB Camera resized intrinsics... 4056 x 3040 ") print(M_rgb) - M_left, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.LEFT) + M_left, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_B) print("LEFT Camera Default intrinsics...") print(M_left) print(width) print(height) - M_left = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, 1280, 720)) + M_left = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, 1280, 720)) print("LEFT Camera resized intrinsics... 1280 x 720") print(M_left) - M_right = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720)) + M_right = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, 1280, 720)) print("RIGHT Camera resized intrinsics... 1280 x 720") print(M_right) - D_left = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.LEFT)) + D_left = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) print("LEFT Distortion Coefficients...") [print(name+": "+value) for (name, value) in zip(["k1","k2","p1","p2","k3","k4","k5","k6","s1","s2","s3","s4","τx","τy"],[str(data) for data in D_left])] - D_right = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RIGHT)) + D_right = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) print("RIGHT Distortion Coefficients...") [print(name+": "+value) for (name, value) in zip(["k1","k2","p1","p2","k3","k4","k5","k6","s1","s2","s3","s4","τx","τy"],[str(data) for data in D_right])] - print(f"RGB FOV {calibData.getFov(dai.CameraBoardSocket.RGB)}, Mono FOV {calibData.getFov(dai.CameraBoardSocket.LEFT)}") + print(f"RGB FOV {calibData.getFov(dai.CameraBoardSocket.CAM_A)}, Mono FOV {calibData.getFov(dai.CameraBoardSocket.CAM_B)}") R1 = np.array(calibData.getStereoLeftRectificationRotation()) R2 = np.array(calibData.getStereoRightRectificationRotation()) @@ -87,10 +87,10 @@ print("RIGHT Camera stereo rectification matrix...") print(H_right) - lr_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RIGHT)) + lr_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C)) print("Transformation matrix of where left Camera is W.R.T right Camera's optical center") print(lr_extrinsics) - l_rgb_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RGB)) + l_rgb_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_A)) print("Transformation matrix of where left Camera is W.R.T RGB Camera's optical center") print(l_rgb_extrinsics) diff --git a/examples/device/device_all_boot_bootloader.py b/examples/device/device_all_boot_bootloader.py new file mode 100755 index 000000000..4c2e9a56b --- /dev/null +++ b/examples/device/device_all_boot_bootloader.py @@ -0,0 +1,6 @@ +import depthai as dai + +devices = dai.Device.getAllConnectedDevices() + +for device in devices: + dai.XLinkConnection.bootBootloader(device) diff --git a/examples/device/device_boot_non_exclusive.py b/examples/device/device_boot_non_exclusive.py new file mode 100755 index 000000000..898b292f6 --- /dev/null +++ b/examples/device/device_boot_non_exclusive.py @@ -0,0 +1,10 @@ +import depthai as dai +import time + +cfg = dai.Device.Config() +cfg.nonExclusiveMode = True + +with dai.Device(cfg) as device: + while not device.isClosed(): + print('CPU usage:',device.getLeonCssCpuUsage().average) + time.sleep(1) \ No newline at end of file diff --git a/examples/host_side/device_information.py b/examples/host_side/device_information.py old mode 100644 new mode 100755 diff --git a/examples/host_side/latency_measurement.py b/examples/host_side/latency_measurement.py old mode 100644 new mode 100755 diff --git a/examples/host_side/opencv_support.py b/examples/host_side/opencv_support.py index f439c3fe3..ae98a9b02 100755 --- a/examples/host_side/opencv_support.py +++ b/examples/host_side/opencv_support.py @@ -16,7 +16,7 @@ # Properties camRgb.setPreviewSize(300, 300) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setInterleaved(True) camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) diff --git a/examples/host_side/queue_add_callback.py b/examples/host_side/queue_add_callback.py index e9c21d0f3..6feb194d0 100755 --- a/examples/host_side/queue_add_callback.py +++ b/examples/host_side/queue_add_callback.py @@ -17,9 +17,9 @@ # Properties camRgb.setPreviewSize(300, 300) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # Stream all the camera streams through the same XLink node diff --git a/examples/install_requirements.py b/examples/install_requirements.py index 1bcd555fa..d4a2b7acc 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -57,7 +57,7 @@ def hasWhitespace(string): if sys.version_info[0] == 3 and sys.version_info[1] == 9: DEPENDENCIES.append('opencv-python!=4.5.4.58') else: - DEPENDENCIES.append('opencv-python') + DEPENDENCIES.append('opencv-contrib-python==4.5.5.62') # same as in depthai requirementx.txt diff --git a/examples/mixed/frame_sync.py b/examples/mixed/frame_sync.py old mode 100644 new mode 100755 index 2fff31edc..29226e285 --- a/examples/mixed/frame_sync.py +++ b/examples/mixed/frame_sync.py @@ -15,12 +15,12 @@ left = pipeline.create(dai.node.MonoCamera) left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(FPS) right = pipeline.create(dai.node.MonoCamera) right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(FPS) stereo = pipeline.createStereoDepth() diff --git a/examples/mixed/mono_depth_mobilenetssd.py b/examples/mixed/mono_depth_mobilenetssd.py index 145b1f915..d7bc31127 100755 --- a/examples/mixed/mono_depth_mobilenetssd.py +++ b/examples/mixed/mono_depth_mobilenetssd.py @@ -38,9 +38,9 @@ nnOut.setStreamName("nn") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # Produce the depth map (using disparity output as it's easier to visualize depth this way) diff --git a/examples/mixed/multiple_devices.py b/examples/mixed/multiple_devices.py old mode 100644 new mode 100755 index 7c3252823..789694d63 --- a/examples/mixed/multiple_devices.py +++ b/examples/mixed/multiple_devices.py @@ -11,7 +11,7 @@ def createPipeline(): camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) - camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) + camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setInterleaved(False) @@ -47,7 +47,7 @@ def createPipeline(): print(" >>> Board name:", eepromData.boardName) if eepromData.productName != "": print(" >>> Product name:", eepromData.productName) - + pipeline = createPipeline() device.startPipeline(pipeline) @@ -55,7 +55,7 @@ def createPipeline(): q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) stream_name = "rgb-" + mxId + "-" + eepromData.productName qRgbMap.append((q_rgb, stream_name)) - + while True: for q_rgb, stream_name in qRgbMap: if q_rgb.has(): diff --git a/examples/mixed/report_camera_settings.py b/examples/mixed/report_camera_settings.py old mode 100644 new mode 100755 index 25b1780fc..78283f09d --- a/examples/mixed/report_camera_settings.py +++ b/examples/mixed/report_camera_settings.py @@ -16,7 +16,7 @@ camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +camLeft.setCamera("left") xoutLeft = pipeline.create(dai.node.XLinkOut) xoutLeft.setStreamName("left") diff --git a/examples/mixed/rgb_encoding_mobilenet.py b/examples/mixed/rgb_encoding_mobilenet.py index fbcb10192..bf00c8bb8 100755 --- a/examples/mixed/rgb_encoding_mobilenet.py +++ b/examples/mixed/rgb_encoding_mobilenet.py @@ -36,7 +36,7 @@ nnOut.setStreamName("nn") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setPreviewSize(300, 300) camRgb.setInterleaved(False) diff --git a/examples/mixed/rgb_encoding_mono_mobilenet.py b/examples/mixed/rgb_encoding_mono_mobilenet.py index f4822ab0e..850635b21 100755 --- a/examples/mixed/rgb_encoding_mono_mobilenet.py +++ b/examples/mixed/rgb_encoding_mono_mobilenet.py @@ -40,9 +40,9 @@ nnOut.setStreamName("nn") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/mixed/rgb_encoding_mono_mobilenet_depth.py b/examples/mixed/rgb_encoding_mono_mobilenet_depth.py index 5b08ce42f..f6a0f627d 100755 --- a/examples/mixed/rgb_encoding_mono_mobilenet_depth.py +++ b/examples/mixed/rgb_encoding_mono_mobilenet_depth.py @@ -44,11 +44,11 @@ nnOut.setStreamName('nn') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/mixed/rotated_spatial_detections.py b/examples/mixed/rotated_spatial_detections.py index 3b386a48b..c22dd057b 100755 --- a/examples/mixed/rotated_spatial_detections.py +++ b/examples/mixed/rotated_spatial_detections.py @@ -4,7 +4,7 @@ import sys import cv2 import depthai as dai - +import numpy as np ''' Spatial object detections demo for 180° rotated OAK camera. ''' @@ -50,14 +50,15 @@ camRgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) +stereo.setSubpixel(True) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) rotate_stereo_manip = pipeline.createImageManip() @@ -104,8 +105,10 @@ frame = inPreview.getCvFrame() depthFrame = depth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) detections = inDet.detections diff --git a/pyproject.toml b/pyproject.toml index 154fc9828..fc198666c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,2 @@ [build-system] -requires = ["setuptools", "wheel", "mypy", "numpy"] # Must be preinstalled "cmake>=3.2.0" +requires = ["setuptools", "wheel", "mypy<=1.3.0", "numpy", "cmake==3.25"] diff --git a/src/CalibrationHandlerBindings.cpp b/src/CalibrationHandlerBindings.cpp index d2cdf050c..ad3461936 100644 --- a/src/CalibrationHandlerBindings.cpp +++ b/src/CalibrationHandlerBindings.cpp @@ -48,7 +48,7 @@ void CalibrationHandlerBindings::bind(pybind11::module& m, void* pCallstack){ .def("getCameraTranslationVector", &CalibrationHandler::getCameraTranslationVector, py::arg("srcCamera"), py::arg("dstCamera"), py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getCameraTranslationVector)) .def("getCameraRotationMatrix", &CalibrationHandler::getCameraRotationMatrix, py::arg("srcCamera"), py::arg("dstCamera"), DOC(dai, CalibrationHandler, getCameraRotationMatrix)) - .def("getBaselineDistance", &CalibrationHandler::getBaselineDistance, py::arg("cam1") = dai::CameraBoardSocket::RIGHT, py::arg("cam2") = dai::CameraBoardSocket::LEFT, py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getBaselineDistance)) + .def("getBaselineDistance", &CalibrationHandler::getBaselineDistance, py::arg("cam1") = dai::CameraBoardSocket::CAM_C, py::arg("cam2") = dai::CameraBoardSocket::CAM_B, py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getBaselineDistance)) .def("getCameraToImuExtrinsics", &CalibrationHandler::getCameraToImuExtrinsics, py::arg("cameraId"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getCameraToImuExtrinsics)) .def("getImuToCameraExtrinsics", &CalibrationHandler::getImuToCameraExtrinsics, py::arg("cameraId"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getImuToCameraExtrinsics)) diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index d90a95f01..afd3c99eb 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -6,6 +6,7 @@ #include "depthai/pipeline/Pipeline.hpp" #include "depthai/utility/Clock.hpp" #include "depthai/xlink/XLinkConnection.hpp" +#include "depthai-shared/device/CrashDump.hpp" // std::chrono bindings #include @@ -218,6 +219,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(pipeline, dev); }), py::arg("pipeline"), DOC(dai, DeviceBase, DeviceBase)) .def(py::init([](const Pipeline& pipeline, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(pipeline, dev, usb2Mode); @@ -233,6 +235,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(pipeline, dev, pathToCmd); }), py::arg("pipeline"), py::arg("pathToCmd"), DOC(dai, DeviceBase, DeviceBase, 4)) .def(py::init([](const Pipeline& pipeline, const DeviceInfo& deviceInfo, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); py::gil_scoped_release release; return std::make_unique(pipeline, deviceInfo, usb2Mode); }), py::arg("pipeline"), py::arg("devInfo"), py::arg("usb2Mode") = false, DOC(dai, DeviceBase, DeviceBase, 6)) @@ -250,8 +253,9 @@ static void bindConstructors(ARG& arg){ auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(version, dev); - }), py::arg("version") = OpenVINO::DEFAULT_VERSION, DOC(dai, DeviceBase, DeviceBase, 10)) + }), py::arg("version") = OpenVINO::VERSION_UNIVERSAL, DOC(dai, DeviceBase, DeviceBase, 10)) .def(py::init([](OpenVINO::Version version, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(version, dev, usb2Mode); @@ -267,6 +271,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(version, dev, pathToCmd); }), py::arg("version"), py::arg("pathToCmd"), DOC(dai, DeviceBase, DeviceBase, 13)) .def(py::init([](OpenVINO::Version version, const DeviceInfo& deviceInfo, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); py::gil_scoped_release release; return std::make_unique(version, deviceInfo, usb2Mode); }), py::arg("version"), py::arg("deviceInfo"), py::arg("usb2Mode") = false, DOC(dai, DeviceBase, DeviceBase, 15)) @@ -289,14 +294,24 @@ static void bindConstructors(ARG& arg){ }), py::arg("config"), py::arg("deviceInfo"), DOC(dai, DeviceBase, DeviceBase, 19)) // DeviceInfo version - .def(py::init([](const DeviceInfo& deviceInfo){ + .def(py::init([](const DeviceInfo& deviceInfo){ py::gil_scoped_release release; return std::make_unique(deviceInfo); }), py::arg("deviceInfo"), DOC(dai, DeviceBase, DeviceBase, 20)) - .def(py::init([](const DeviceInfo& deviceInfo, UsbSpeed maxUsbSpeed){ + .def(py::init([](const DeviceInfo& deviceInfo, UsbSpeed maxUsbSpeed){ py::gil_scoped_release release; return std::make_unique(deviceInfo, maxUsbSpeed); }), py::arg("deviceInfo"), py::arg("maxUsbSpeed"), DOC(dai, DeviceBase, DeviceBase, 21)) + + // name or device id version + .def(py::init([](std::string nameOrDeviceId){ + py::gil_scoped_release release; + return std::make_unique(std::move(nameOrDeviceId)); + }), py::arg("nameOrDeviceId"), DOC(dai, DeviceBase, DeviceBase, 22)) + .def(py::init([](std::string nameOrDeviceId, UsbSpeed maxUsbSpeed){ + py::gil_scoped_release release; + return std::make_unique(std::move(nameOrDeviceId), maxUsbSpeed); + }), py::arg("nameOrDeviceId"), py::arg("maxUsbSpeed"), DOC(dai, DeviceBase, DeviceBase, 23)) ; } @@ -311,6 +326,13 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ py::class_ deviceBase(m, "DeviceBase", DOC(dai, DeviceBase)); py::class_ device(m, "Device", DOC(dai, Device)); py::class_ deviceConfig(device, "Config", DOC(dai, DeviceBase, Config)); + py::class_ crashDump(m, "CrashDump", DOC(dai, CrashDump)); + py::class_ crashReport(crashDump, "CrashReport", DOC(dai, CrashDump, CrashReport)); + py::class_ errorSourceInfo(crashReport, "ErrorSourceInfo", DOC(dai, CrashDump, CrashReport, ErrorSourceInfo)); + py::class_ assertContext(errorSourceInfo, "AssertContext", DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, AssertContext)); + py::class_ trapContext(errorSourceInfo, "TrapContext", DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, TrapContext)); + py::class_ threadCallstack(crashReport, "ThreadCallstack", DOC(dai, CrashDump, CrashReport, ThreadCallstack)); + py::class_ callstackContext(threadCallstack, "CallstackContext", DOC(dai, CrashDump, CrashReport, ThreadCallstack, CallstackContext)); py::class_ boardConfig(m, "BoardConfig", DOC(dai, BoardConfig)); py::class_ boardConfigUsb(boardConfig, "USB", DOC(dai, BoardConfig, USB)); py::class_ boardConfigNetwork(boardConfig, "Network", DOC(dai, BoardConfig, Network)); @@ -321,6 +343,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ boardConfigGpioPull(boardConfigGpio, "Pull", DOC(dai, BoardConfig, GPIO, Pull)); py::enum_ boardConfigGpioDrive(boardConfigGpio, "Drive", DOC(dai, BoardConfig, GPIO, Drive)); py::class_ boardConfigUart(boardConfig, "UART", DOC(dai, BoardConfig, UART)); + py::class_ boardConfigUvc(boardConfig, "UVC", DOC(dai, BoardConfig, UVC)); struct PyClock{}; py::class_ clock(m, "Clock"); @@ -357,6 +380,8 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("flashBootedVid", &BoardConfig::USB::flashBootedVid) .def_readwrite("flashBootedPid", &BoardConfig::USB::flashBootedPid) .def_readwrite("maxSpeed", &BoardConfig::USB::maxSpeed) + .def_readwrite("productName", &BoardConfig::USB::productName) + .def_readwrite("manufacturer", &BoardConfig::USB::manufacturer) ; // Bind BoardConfig::Network @@ -434,24 +459,36 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("tmp", &BoardConfig::UART::tmp) ; + // Bind BoardConfig::UVC + boardConfigUvc + .def(py::init<>()) + .def(py::init()) + .def_readwrite("cameraName", &BoardConfig::UVC::cameraName) + .def_readwrite("width", &BoardConfig::UVC::width) + .def_readwrite("height", &BoardConfig::UVC::height) + .def_readwrite("frameType", &BoardConfig::UVC::frameType) + .def_readwrite("enable", &BoardConfig::UVC::enable) + ; + // Bind BoardConfig boardConfig .def(py::init<>()) - .def_readwrite("usb", &BoardConfig::usb) - .def_readwrite("network", &BoardConfig::network) - .def_readwrite("sysctl", &BoardConfig::sysctl) - .def_readwrite("watchdogTimeoutMs", &BoardConfig::watchdogTimeoutMs) - .def_readwrite("watchdogInitialDelayMs", &BoardConfig::watchdogInitialDelayMs) - .def_readwrite("gpio", &BoardConfig::gpio) - .def_readwrite("uart", &BoardConfig::uart) - .def_readwrite("pcieInternalClock", &BoardConfig::pcieInternalClock) - .def_readwrite("usb3PhyInternalClock", &BoardConfig::usb3PhyInternalClock) - .def_readwrite("mipi4LaneRgb", &BoardConfig::mipi4LaneRgb) - .def_readwrite("emmc", &BoardConfig::emmc) - .def_readwrite("logPath", &BoardConfig::logPath) - .def_readwrite("logSizeMax", &BoardConfig::logSizeMax) - .def_readwrite("logVerbosity", &BoardConfig::logVerbosity) - .def_readwrite("logDevicePrints", &BoardConfig::logDevicePrints) + .def_readwrite("usb", &BoardConfig::usb, DOC(dai, BoardConfig, usb)) + .def_readwrite("network", &BoardConfig::network, DOC(dai, BoardConfig, network)) + .def_readwrite("sysctl", &BoardConfig::sysctl, DOC(dai, BoardConfig, sysctl)) + .def_readwrite("watchdogTimeoutMs", &BoardConfig::watchdogTimeoutMs, DOC(dai, BoardConfig, watchdogTimeoutMs)) + .def_readwrite("watchdogInitialDelayMs", &BoardConfig::watchdogInitialDelayMs, DOC(dai, BoardConfig, watchdogInitialDelayMs)) + .def_readwrite("gpio", &BoardConfig::gpio, DOC(dai, BoardConfig, gpio)) + .def_readwrite("uart", &BoardConfig::uart, DOC(dai, BoardConfig, uart)) + .def_readwrite("pcieInternalClock", &BoardConfig::pcieInternalClock, DOC(dai, BoardConfig, pcieInternalClock)) + .def_readwrite("usb3PhyInternalClock", &BoardConfig::usb3PhyInternalClock, DOC(dai, BoardConfig, usb3PhyInternalClock)) + .def_readwrite("mipi4LaneRgb", &BoardConfig::mipi4LaneRgb, DOC(dai, BoardConfig, mipi4LaneRgb)) + .def_readwrite("emmc", &BoardConfig::emmc, DOC(dai, BoardConfig, emmc)) + .def_readwrite("logPath", &BoardConfig::logPath, DOC(dai, BoardConfig, logPath)) + .def_readwrite("logSizeMax", &BoardConfig::logSizeMax, DOC(dai, BoardConfig, logSizeMax)) + .def_readwrite("logVerbosity", &BoardConfig::logVerbosity, DOC(dai, BoardConfig, logVerbosity)) + .def_readwrite("logDevicePrints", &BoardConfig::logDevicePrints, DOC(dai, BoardConfig, logDevicePrints)) + .def_readwrite("uvc", &BoardConfig::uvc, DOC(dai, BoardConfig, uvc)) ; // Bind Device::Config @@ -459,6 +496,68 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def_readwrite("version", &Device::Config::version) .def_readwrite("board", &Device::Config::board) + .def_readwrite("nonExclusiveMode", &Device::Config::nonExclusiveMode) + .def_readwrite("outputLogLevel", &Device::Config::outputLogLevel) + .def_readwrite("logLevel", &Device::Config::logLevel) + ; + + // Bind CrashDump + crashDump + .def(py::init<>()) + .def("serializeToJson", &CrashDump::serializeToJson, DOC(dai, CrashDump, serializeToJson)) + + .def_readwrite("crashReports", &CrashDump::crashReports, DOC(dai, CrashDump, crashReports)) + .def_readwrite("depthaiCommitHash", &CrashDump::depthaiCommitHash, DOC(dai, CrashDump, depthaiCommitHash)) + .def_readwrite("deviceId", &CrashDump::deviceId, DOC(dai, CrashDump, deviceId)) + ; + + crashReport + .def(py::init<>()) + .def_readwrite("processor", &CrashDump::CrashReport::processor, DOC(dai, CrashDump, CrashReport, processor)) + .def_readwrite("errorSource", &CrashDump::CrashReport::errorSource, DOC(dai, CrashDump, CrashReport, errorSource)) + .def_readwrite("crashedThreadId", &CrashDump::CrashReport::crashedThreadId, DOC(dai, CrashDump, CrashReport, crashedThreadId)) + .def_readwrite("threadCallstack", &CrashDump::CrashReport::threadCallstack, DOC(dai, CrashDump, CrashReport, threadCallstack)) + ; + + errorSourceInfo + .def(py::init<>()) + .def_readwrite("assertContext", &CrashDump::CrashReport::ErrorSourceInfo::assertContext, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, assertContext)) + .def_readwrite("trapContext", &CrashDump::CrashReport::ErrorSourceInfo::trapContext, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, trapContext)) + .def_readwrite("errorId", &CrashDump::CrashReport::ErrorSourceInfo::errorId, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, errorId)) + ; + + assertContext + .def(py::init<>()) + .def_readwrite("fileName", &CrashDump::CrashReport::ErrorSourceInfo::AssertContext::fileName, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, AssertContext, fileName)) + .def_readwrite("functionName", &CrashDump::CrashReport::ErrorSourceInfo::AssertContext::functionName, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, AssertContext, functionName)) + .def_readwrite("line", &CrashDump::CrashReport::ErrorSourceInfo::AssertContext::line, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, AssertContext, line)) + ; + + trapContext + .def(py::init<>()) + .def_readwrite("trapNumber", &CrashDump::CrashReport::ErrorSourceInfo::TrapContext::trapNumber, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, TrapContext, trapNumber)) + .def_readwrite("trapAddress", &CrashDump::CrashReport::ErrorSourceInfo::TrapContext::trapAddress, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, TrapContext, trapAddress)) + .def_readwrite("trapName", &CrashDump::CrashReport::ErrorSourceInfo::TrapContext::trapName, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, TrapContext, trapName)) + ; + + threadCallstack + .def(py::init<>()) + .def_readwrite("threadId", &CrashDump::CrashReport::ThreadCallstack::threadId, DOC(dai, CrashDump, CrashReport, ThreadCallstack, threadId)) + .def_readwrite("threadName", &CrashDump::CrashReport::ThreadCallstack::threadName, DOC(dai, CrashDump, CrashReport, ThreadCallstack, threadName)) + .def_readwrite("stackBottom", &CrashDump::CrashReport::ThreadCallstack::stackBottom, DOC(dai, CrashDump, CrashReport, ThreadCallstack, stackBottom)) + .def_readwrite("stackTop", &CrashDump::CrashReport::ThreadCallstack::stackTop, DOC(dai, CrashDump, CrashReport, ThreadCallstack, stackTop)) + .def_readwrite("stackPointer", &CrashDump::CrashReport::ThreadCallstack::stackPointer, DOC(dai, CrashDump, CrashReport, ThreadCallstack, stackPointer)) + .def_readwrite("instructionPointer", &CrashDump::CrashReport::ThreadCallstack::instructionPointer, DOC(dai, CrashDump, CrashReport, ThreadCallstack, instructionPointer)) + .def_readwrite("threadStatus", &CrashDump::CrashReport::ThreadCallstack::threadStatus, DOC(dai, CrashDump, CrashReport, ThreadCallstack, threadStatus)) + .def_readwrite("callStack", &CrashDump::CrashReport::ThreadCallstack::callStack, DOC(dai, CrashDump, CrashReport, ThreadCallstack, callStack)) + ; + + callstackContext + .def(py::init<>()) + .def_readwrite("callSite", &CrashDump::CrashReport::ThreadCallstack::CallstackContext::callSite, DOC(dai, CrashDump, CrashReport, ThreadCallstack, CallstackContext, callSite)) + .def_readwrite("calledTarget", &CrashDump::CrashReport::ThreadCallstack::CallstackContext::calledTarget, DOC(dai, CrashDump, CrashReport, ThreadCallstack, CallstackContext, calledTarget)) + .def_readwrite("framePointer", &CrashDump::CrashReport::ThreadCallstack::CallstackContext::framePointer, DOC(dai, CrashDump, CrashReport, ThreadCallstack, CallstackContext, framePointer)) + .def_readwrite("context", &CrashDump::CrashReport::ThreadCallstack::CallstackContext::context, DOC(dai, CrashDump, CrashReport, ThreadCallstack, CallstackContext, context)) ; // Bind constructors @@ -480,10 +579,11 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_static("getAnyAvailableDevice", [](){ return DeviceBase::getAnyAvailableDevice(); }, DOC(dai, DeviceBase, getAnyAvailableDevice, 2)) .def_static("getFirstAvailableDevice", &DeviceBase::getFirstAvailableDevice, py::arg("skipInvalidDevices") = true, DOC(dai, DeviceBase, getFirstAvailableDevice)) .def_static("getAllAvailableDevices", &DeviceBase::getAllAvailableDevices, DOC(dai, DeviceBase, getAllAvailableDevices)) - .def_static("getEmbeddedDeviceBinary", py::overload_cast(&DeviceBase::getEmbeddedDeviceBinary), py::arg("usb2Mode"), py::arg("version") = OpenVINO::DEFAULT_VERSION, DOC(dai, DeviceBase, getEmbeddedDeviceBinary)) + .def_static("getEmbeddedDeviceBinary", py::overload_cast(&DeviceBase::getEmbeddedDeviceBinary), py::arg("usb2Mode"), py::arg("version") = OpenVINO::VERSION_UNIVERSAL, DOC(dai, DeviceBase, getEmbeddedDeviceBinary)) .def_static("getEmbeddedDeviceBinary", py::overload_cast(&DeviceBase::getEmbeddedDeviceBinary), py::arg("config"), DOC(dai, DeviceBase, getEmbeddedDeviceBinary, 2)) .def_static("getDeviceByMxId", &DeviceBase::getDeviceByMxId, py::arg("mxId"), DOC(dai, DeviceBase, getDeviceByMxId)) .def_static("getAllConnectedDevices", &DeviceBase::getAllConnectedDevices, DOC(dai, DeviceBase, getAllConnectedDevices)) + .def_static("getGlobalProfilingData", &DeviceBase::getGlobalProfilingData, DOC(dai, DeviceBase, getGlobalProfilingData)) // methods .def("getBootloaderVersion", &DeviceBase::getBootloaderVersion, DOC(dai, DeviceBase, getBootloaderVersion)) @@ -530,6 +630,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("getUsbSpeed", [](DeviceBase& d) { py::gil_scoped_release release; return d.getUsbSpeed(); }, DOC(dai, DeviceBase, getUsbSpeed)) .def("getDeviceInfo", [](DeviceBase& d) { py::gil_scoped_release release; return d.getDeviceInfo(); }, DOC(dai, DeviceBase, getDeviceInfo)) .def("getMxId", [](DeviceBase& d) { py::gil_scoped_release release; return d.getMxId(); }, DOC(dai, DeviceBase, getMxId)) + .def("getProfilingData", [](DeviceBase& d) { py::gil_scoped_release release; return d.getProfilingData(); }, DOC(dai, DeviceBase, getProfilingData)) .def("readCalibration", [](DeviceBase& d) { py::gil_scoped_release release; return d.readCalibration(); }, DOC(dai, DeviceBase, readCalibration)) .def("flashCalibration", [](DeviceBase& d, CalibrationHandler calibrationDataHandler) { py::gil_scoped_release release; return d.flashCalibration(calibrationDataHandler); }, py::arg("calibrationDataHandler"), DOC(dai, DeviceBase, flashCalibration)) .def("setXLinkChunkSize", [](DeviceBase& d, int s) { py::gil_scoped_release release; d.setXLinkChunkSize(s); }, py::arg("sizeBytes"), DOC(dai, DeviceBase, setXLinkChunkSize)) @@ -551,7 +652,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("flashFactoryEepromClear", [](DeviceBase& d) { py::gil_scoped_release release; d.flashFactoryEepromClear(); }, DOC(dai, DeviceBase, flashFactoryEepromClear)) .def("setTimesync", [](DeviceBase& d, std::chrono::milliseconds p, int s, bool r) { py::gil_scoped_release release; return d.setTimesync(p,s,r); }, DOC(dai, DeviceBase, setTimesync)) .def("setTimesync", [](DeviceBase& d, bool e) { py::gil_scoped_release release; return d.setTimesync(e); }, py::arg("enable"), DOC(dai, DeviceBase, setTimesync, 2)) - .def("getDeviceName", [](DeviceBase& d) { py::gil_scoped_release release; return d.getDeviceName(); }, DOC(dai, DeviceBase, getDeviceName)) + .def("getDeviceName", [](DeviceBase& d) { std::string name; { py::gil_scoped_release release; name = d.getDeviceName(); } return py::bytes(name).attr("decode")("utf-8", "replace"); }, DOC(dai, DeviceBase, getDeviceName)) ; diff --git a/src/DeviceBootloaderBindings.cpp b/src/DeviceBootloaderBindings.cpp index c91d4365e..b862c6f80 100644 --- a/src/DeviceBootloaderBindings.cpp +++ b/src/DeviceBootloaderBindings.cpp @@ -131,8 +131,10 @@ void DeviceBootloaderBindings::bind(pybind11::module& m, void* pCallstack){ .def_static("getEmbeddedBootloaderVersion", &DeviceBootloader::getEmbeddedBootloaderVersion, DOC(dai, DeviceBootloader, getEmbeddedBootloaderVersion)) .def_static("getEmbeddedBootloaderBinary", &DeviceBootloader::getEmbeddedBootloaderBinary, DOC(dai, DeviceBootloader, getEmbeddedBootloaderBinary)) - .def(py::init(), py::arg("devInfo"), py::arg("allowFlashingBootloader") = false, DOC(dai, DeviceBootloader, DeviceBootloader)) - .def(py::init(), py::arg("devInfo"), py::arg("pathToCmd"), py::arg("allowFlashingBootloader") = false, DOC(dai, DeviceBootloader, DeviceBootloader, 2)) + .def(py::init(), py::arg("devInfo"), py::arg("allowFlashingBootloader") = false, DOC(dai, DeviceBootloader, DeviceBootloader, 4)) + .def(py::init(), py::arg("devInfo"), py::arg("pathToCmd"), py::arg("allowFlashingBootloader") = false, DOC(dai, DeviceBootloader, DeviceBootloader, 5)) + .def(py::init(), py::arg("nameOrDeviceId"), py::arg("allowFlashingBootloader") = false, DOC(dai, DeviceBootloader, DeviceBootloader, 6)) + .def("flash", [](DeviceBootloader& db, std::function progressCallback, const Pipeline& pipeline, bool compress, std::string applicationName, DeviceBootloader::Memory memory, bool checkChecksum) { py::gil_scoped_release release; return db.flash(progressCallback, pipeline, compress, applicationName, memory, checkChecksum); }, py::arg("progressCallback"), py::arg("pipeline"), py::arg("compress") = false, py::arg("applicationName") = "", py::arg("memory") = DeviceBootloader::Memory::AUTO, py::arg("checkChecksum") = false, DOC(dai, DeviceBootloader, flash)) .def("flash", [](DeviceBootloader& db, const Pipeline& pipeline, bool compress, std::string applicationName, DeviceBootloader::Memory memory, bool checkChecksum) { py::gil_scoped_release release; return db.flash(pipeline, compress, applicationName, memory, checkChecksum); }, py::arg("pipeline"), py::arg("compress") = false, py::arg("applicationName") = "", py::arg("memory") = DeviceBootloader::Memory::AUTO, py::arg("checkChecksum") = false, DOC(dai, DeviceBootloader, flash, 2)) diff --git a/src/XLinkBindings.cpp b/src/XLinkBindings.cpp index d6bb4a4c2..b693a49f9 100644 --- a/src/XLinkBindings.cpp +++ b/src/XLinkBindings.cpp @@ -141,6 +141,7 @@ void XLinkBindings::bind(pybind11::module &m, void *pCallstack) .def_static("getFirstDevice", &XLinkConnection::getFirstDevice, py::arg("state") = X_LINK_ANY_STATE, py::arg("skipInvalidDevice") = true) .def_static("getDeviceByMxId", &XLinkConnection::getDeviceByMxId, py::arg("mxId"), py::arg("state") = X_LINK_ANY_STATE, py::arg("skipInvalidDevice") = true) .def_static("bootBootloader", &XLinkConnection::bootBootloader, py::arg("devInfo")) + .def_static("getGlobalProfilingData", &XLinkConnection::getGlobalProfilingData, DOC(dai, XLinkConnection, getGlobalProfilingData)) ; xLinkError diff --git a/src/device_bindings.cpp b/src/device_bindings.cpp deleted file mode 100644 index 50a855399..000000000 --- a/src/device_bindings.cpp +++ /dev/null @@ -1,133 +0,0 @@ -#include "device_bindings.hpp" - -//std -#include - -//depthai-core -#include "depthai/device.hpp" -//#include "depthai/host_capture_command.hpp" - -//depthai-shared -//#include "depthai-shared/metadata/capture_metadata.hpp" - -//project -#include "pybind11_common.hpp" - - - - -// Binding for HostDataPacket -namespace py = pybind11; - -void init_binding_device(pybind11::module& m){ - - using namespace dai; - - py::class_(m, "Device") - .def(py::init<>()) - .def(py::init()) - .def(py::init()) - .def( - "create_pipeline", - [](Device& device, py::dict config) - { - - - // str(dict) for string representation uses ['] , but JSON requires ["] - // fast & dirty solution: - std::string str = py::str(config); - boost::replace_all(str, "\'", "\""); - boost::replace_all(str, "None", "null"); - boost::replace_all(str, "True", "true"); - boost::replace_all(str, "False", "false"); - // TODO: make better json serialization - - return device.create_pipeline(str); - }, - "Function for pipeline creation", - py::arg("config") = py::dict() - ) - .def( - "get_available_streams", - &Device::get_available_streams, - "Returns available streams, that possible to retreive from the device." - ) - .def( - "request_jpeg", - &Device::request_jpeg, - "Function to request a still JPEG encoded image ('jpeg' stream must be enabled)" - ) - .def( - "request_af_trigger", - &Device::request_af_trigger, - "Function to request autofocus trigger" - ) - .def( - "request_af_mode", - &Device::request_af_mode, - "Function to request a certain autofocus mode (Check 'AutofocusMode.__members__')" - ) - .def( - "send_disparity_confidence_threshold", - &Device::send_disparity_confidence_threshold, - "Function to send disparity confidence threshold for SGBM" - ) - - .def( - "get_nn_to_depth_bbox_mapping", - &Device::get_nn_to_depth_bbox_mapping, - "Returns NN bounding-box to depth mapping as a dict of coords: off_x, off_y, max_w, max_h." - ) - - // calibration data bindings - .def( - "get_left_intrinsic", - &Device::get_left_intrinsic, - "Returns 3x3 matrix defining the intrinsic parameters of the left camera of the stereo setup." - ) - - .def( - "get_left_homography", - &Device::get_left_homography, - "Returns 3x3 matrix defining the homography to rectify the left camera of the stereo setup." - ) - - .def( - "get_right_intrinsic", - &Device::get_right_intrinsic, - "Returns 3x3 matrix defining the intrinsic parameters of the right camera of the stereo setup." - ) - - .def( - "get_right_homography", - &Device::get_right_homography, - "Returns 3x3 matrix defining the homography to rectify the right camera of the stereo setup." - ) - - .def( - "get_rotation", - &Device::get_rotation, - "Returns 3x3 matrix defining how much the right camera is rotated w.r.t left camera." - ) - - .def( - "get_translation", - &Device::get_translation, - "Returns a vector defining how much the right camera is translated w.r.t left camera." - ) - - - - ; - - - py::enum_(m, "AutofocusMode") - .value("AF_MODE_AUTO", CaptureMetadata::AutofocusMode::AF_MODE_AUTO) - .value("AF_MODE_MACRO", CaptureMetadata::AutofocusMode::AF_MODE_MACRO) - .value("AF_MODE_CONTINUOUS_VIDEO", CaptureMetadata::AutofocusMode::AF_MODE_CONTINUOUS_VIDEO) - .value("AF_MODE_CONTINUOUS_PICTURE", CaptureMetadata::AutofocusMode::AF_MODE_CONTINUOUS_PICTURE) - .value("AF_MODE_EDOF", CaptureMetadata::AutofocusMode::AF_MODE_EDOF) - ; - -} - diff --git a/src/device_bindings.hpp b/src/device_bindings.hpp deleted file mode 100644 index 78a7d43f0..000000000 --- a/src/device_bindings.hpp +++ /dev/null @@ -1,9 +0,0 @@ -#pragma once - -//pybind11 -#include "pybind11_common.hpp" - -// depthai-api -#include "depthai/device.hpp" - -void init_binding_device(pybind11::module& m); \ No newline at end of file diff --git a/src/openvino/OpenVINOBindings.cpp b/src/openvino/OpenVINOBindings.cpp index 693eda679..a49947b16 100644 --- a/src/openvino/OpenVINOBindings.cpp +++ b/src/openvino/OpenVINOBindings.cpp @@ -53,6 +53,7 @@ void OpenVINOBindings::bind(pybind11::module& m, void* pCallstack){ .value("VERSION_2021_3", OpenVINO::Version::VERSION_2021_3) .value("VERSION_2021_4", OpenVINO::Version::VERSION_2021_4) .value("VERSION_2022_1", OpenVINO::Version::VERSION_2022_1) + .value("VERSION_UNIVERSAL", OpenVINO::Version::VERSION_UNIVERSAL) .export_values() ; // DEFAULT_VERSION binding diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index 817c3fd59..82444a6a3 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -1,5 +1,8 @@ #include "CommonBindings.hpp" +// Libraries +#include "hedley/hedley.h" + // depthai-shared #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai-shared/common/EepromData.hpp" @@ -21,9 +24,12 @@ #include "depthai-shared/common/RotatedRect.hpp" #include "depthai-shared/common/Rect.hpp" #include "depthai-shared/common/Colormap.hpp" +#include "depthai-shared/common/FrameEvent.hpp" // depthai #include "depthai/common/CameraFeatures.hpp" +#include "depthai/common/CameraExposureOffset.hpp" +#include "depthai/utility/ProfilingData.hpp" void CommonBindings::bind(pybind11::module& m, void* pCallstack){ @@ -36,6 +42,7 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ cameraBoardSocket(m, "CameraBoardSocket", DOC(dai, CameraBoardSocket)); py::enum_ cameraSensorType(m, "CameraSensorType", DOC(dai, CameraSensorType)); py::enum_ cameraImageOrientation(m, "CameraImageOrientation", DOC(dai, CameraImageOrientation)); + py::class_ cameraSensorConfig(m, "CameraSensorConfig", DOC(dai, CameraSensorConfig)); py::class_ cameraFeatures(m, "CameraFeatures", DOC(dai, CameraFeatures)); py::class_ memoryInfo(m, "MemoryInfo", DOC(dai, MemoryInfo)); py::class_ chipTemperature(m, "ChipTemperature", DOC(dai, ChipTemperature)); @@ -53,7 +60,10 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ py::class_ detectionParserOptions(m, "DetectionParserOptions", DOC(dai, DetectionParserOptions)); py::class_ rotatedRect(m, "RotatedRect", DOC(dai, RotatedRect)); py::class_ rect(m, "Rect", DOC(dai, Rect)); + py::enum_ cameraExposureOffset(m, "CameraExposureOffset"); py::enum_ colormap(m, "Colormap", DOC(dai, Colormap)); + py::enum_ frameEvent(m, "FrameEvent", DOC(dai, FrameEvent)); + py::class_ profilingData(m, "ProfilingData", DOC(dai, ProfilingData)); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -126,12 +136,12 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ ; // CameraBoardSocket enum bindings + + // Deprecated + HEDLEY_DIAGNOSTIC_PUSH + HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED cameraBoardSocket .value("AUTO", CameraBoardSocket::AUTO) - .value("RGB", CameraBoardSocket::RGB) - .value("LEFT", CameraBoardSocket::LEFT) - .value("RIGHT", CameraBoardSocket::RIGHT) - .value("CENTER", CameraBoardSocket::CENTER) .value("CAM_A", CameraBoardSocket::CAM_A) .value("CAM_B", CameraBoardSocket::CAM_B) .value("CAM_C", CameraBoardSocket::CAM_C) @@ -141,7 +151,31 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("CAM_F", CameraBoardSocket::CAM_F) .value("CAM_G", CameraBoardSocket::CAM_G) .value("CAM_H", CameraBoardSocket::CAM_H) + + .value("RGB", CameraBoardSocket::RGB, "**Deprecated:** Use CAM_A or address camera by name instead") + .value("LEFT", CameraBoardSocket::LEFT, "**Deprecated:** Use CAM_B or address camera by name instead") + .value("RIGHT", CameraBoardSocket::RIGHT, "**Deprecated:** Use CAM_C or address camera by name instead") + .value("CENTER", CameraBoardSocket::CENTER, "**Deprecated:** Use CAM_A or address camera by name instead") + + // Deprecated overriden + .def_property_readonly_static("RGB", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "RGB is deprecated, use CAM_A or address camera by name instead.", 1); + return CameraBoardSocket::CAM_A; + }) + .def_property_readonly_static("CENTER", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "CENTER is deprecated, use CAM_A or address camera by name instead.", 1); + return CameraBoardSocket::CAM_A; + }) + .def_property_readonly_static("LEFT", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "LEFT is deprecated, use CAM_B or address camera by name instead.", 1); + return CameraBoardSocket::CAM_B; + }) + .def_property_readonly_static("RIGHT", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "RIGHT is deprecated, use CAM_C or address camera by name instead.", 1); + return CameraBoardSocket::CAM_C; + }) ; + HEDLEY_DIAGNOSTIC_POP // CameraSensorType enum bindings cameraSensorType @@ -171,6 +205,7 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("supportedTypes", &CameraFeatures::supportedTypes) .def_readwrite("hasAutofocus", &CameraFeatures::hasAutofocus) .def_readwrite("name", &CameraFeatures::name) + .def_readwrite("configs", &CameraFeatures::configs) .def("__repr__", [](CameraFeatures& camera) { std::stringstream stream; stream << camera; @@ -178,6 +213,16 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ }); ; + // CameraSensorConfig + cameraSensorConfig + .def(py::init<>()) + .def_readwrite("width", &CameraSensorConfig::width) + .def_readwrite("height", &CameraSensorConfig::height) + .def_readwrite("minFps", &CameraSensorConfig::minFps) + .def_readwrite("maxFps", &CameraSensorConfig::maxFps) + .def_readwrite("type", &CameraSensorConfig::type) + ; + // MemoryInfo memoryInfo .def(py::init<>()) @@ -304,6 +349,12 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("iouThreshold", &DetectionParserOptions::iouThreshold) ; + cameraExposureOffset + .value("START", CameraExposureOffset::START) + .value("MIDDLE", CameraExposureOffset::MIDDLE) + .value("END", CameraExposureOffset::END) + ; + colormap .value("NONE", Colormap::NONE) .value("JET", Colormap::JET) @@ -332,4 +383,15 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ // .value("DEEPGREEN", Colormap::DEEPGREEN) ; + frameEvent + .value("NONE", FrameEvent::NONE) + .value("READOUT_START", FrameEvent::READOUT_START) + .value("READOUT_END", FrameEvent::READOUT_END) + ; + + profilingData + .def_readwrite("numBytesWritten", &ProfilingData::numBytesWritten, DOC(dai, ProfilingData, numBytesWritten)) + .def_readwrite("numBytesRead", &ProfilingData::numBytesRead, DOC(dai, ProfilingData, numBytesRead)) + ; + } diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 56abf17e2..1571ae0da 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -12,6 +12,7 @@ #include "depthai/pipeline/node/BenchmarkIn.hpp" #include "depthai/pipeline/node/NeuralNetwork.hpp" #include "depthai/pipeline/node/ColorCamera.hpp" +#include "depthai/pipeline/node/Camera.hpp" #include "depthai/pipeline/node/VideoEncoder.hpp" #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/SPIIn.hpp" @@ -29,6 +30,7 @@ #include "depthai/pipeline/node/FeatureTracker.hpp" #include "depthai/pipeline/node/AprilTag.hpp" #include "depthai/pipeline/node/DetectionParser.hpp" +#include "depthai/pipeline/node/UVC.hpp" // depthai-shared #include "depthai-shared/properties/GlobalProperties.hpp" @@ -75,6 +77,8 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("cameraTuningBlobSize", &GlobalProperties::cameraTuningBlobSize, DOC(dai, GlobalProperties, cameraTuningBlobSize)) .def_readwrite("cameraTuningBlobUri", &GlobalProperties::cameraTuningBlobUri, DOC(dai, GlobalProperties, cameraTuningBlobUri)) .def_readwrite("xlinkChunkSize", &GlobalProperties::xlinkChunkSize, DOC(dai, GlobalProperties, xlinkChunkSize)) + .def_readwrite("sippBufferSize", &GlobalProperties::sippBufferSize, DOC(dai, GlobalProperties, sippBufferSize)) + .def_readwrite("sippDmaBufferSize", &GlobalProperties::sippDmaBufferSize, DOC(dai, GlobalProperties, sippDmaBufferSize)) ; // bind pipeline @@ -96,11 +100,13 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack){ // .def("unlink", &Pipeline::unlink, DOC(dai, Pipeline, unlink), DOC(dai, Pipeline, unlink)) .def("getAssetManager", static_cast(&Pipeline::getAssetManager), py::return_value_policy::reference_internal, DOC(dai, Pipeline, getAssetManager)) .def("getAssetManager", static_cast(&Pipeline::getAssetManager), py::return_value_policy::reference_internal, DOC(dai, Pipeline, getAssetManager)) - .def("setOpenVINOVersion", &Pipeline::setOpenVINOVersion, py::arg("version") = OpenVINO::DEFAULT_VERSION, DOC(dai, Pipeline, setOpenVINOVersion)) + .def("setOpenVINOVersion", &Pipeline::setOpenVINOVersion, py::arg("version"), DOC(dai, Pipeline, setOpenVINOVersion)) .def("getOpenVINOVersion", &Pipeline::getOpenVINOVersion, DOC(dai, Pipeline, getOpenVINOVersion)) .def("getRequiredOpenVINOVersion", &Pipeline::getRequiredOpenVINOVersion, DOC(dai, Pipeline, getRequiredOpenVINOVersion)) .def("setCameraTuningBlobPath", &Pipeline::setCameraTuningBlobPath, py::arg("path"), DOC(dai, Pipeline, setCameraTuningBlobPath)) .def("setXLinkChunkSize", &Pipeline::setXLinkChunkSize, py::arg("sizeBytes"), DOC(dai, Pipeline, setXLinkChunkSize)) + .def("setSippBufferSize", &Pipeline::setSippBufferSize, py::arg("sizeBytes"), DOC(dai, Pipeline, setSippBufferSize)) + .def("setSippDmaBufferSize", &Pipeline::setSippDmaBufferSize, py::arg("sizeBytes"), DOC(dai, Pipeline, setSippDmaBufferSize)) .def("setCalibrationData", &Pipeline::setCalibrationData, py::arg("calibrationDataHandler"), DOC(dai, Pipeline, setCalibrationData)) .def("getCalibrationData", &Pipeline::getCalibrationData, DOC(dai, Pipeline, getCalibrationData)) .def("getDeviceConfig", &Pipeline::getDeviceConfig, DOC(dai, Pipeline, getDeviceConfig)) @@ -140,6 +146,7 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack){ .def("createFeatureTracker", &Pipeline::create) .def("createAprilTag", &Pipeline::create) .def("createDetectionParser", &Pipeline::create) + .def("createUVC", &Pipeline::create) ; diff --git a/src/pipeline/datatype/CameraControlBindings.cpp b/src/pipeline/datatype/CameraControlBindings.cpp index a3d458366..f6337bb6b 100644 --- a/src/pipeline/datatype/CameraControlBindings.cpp +++ b/src/pipeline/datatype/CameraControlBindings.cpp @@ -218,12 +218,14 @@ std::vector camCtrlAttr; .def("setMisc", py::overload_cast(&CameraControl::setMisc), py::arg("control"), py::arg("value"), DOC(dai, CameraControl, setMisc, 2)) .def("setMisc", py::overload_cast(&CameraControl::setMisc), py::arg("control"), py::arg("value"), DOC(dai, CameraControl, setMisc, 3)) .def("clearMiscControls", &CameraControl::clearMiscControls, DOC(dai, CameraControl, clearMiscControls)) + .def("set", &CameraControl::set, py::arg("config"), DOC(dai, CameraControl, set)) // getters .def("getMiscControls", &CameraControl::getMiscControls, DOC(dai, CameraControl, getMiscControls)) .def("getCaptureStill", &CameraControl::getCaptureStill, DOC(dai, CameraControl, getCaptureStill)) .def("getExposureTime", &CameraControl::getExposureTime, DOC(dai, CameraControl, getExposureTime)) .def("getSensitivity", &CameraControl::getSensitivity, DOC(dai, CameraControl, getSensitivity)) .def("getLensPosition", &CameraControl::getLensPosition, DOC(dai, CameraControl, getLensPosition)) + .def("get", &CameraControl::get, DOC(dai, CameraControl, get)) ; // Add also enum attributes from RawCameraControl for (const auto& a : camCtrlAttr) { diff --git a/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp b/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp index fd5e39a11..eaefb6825 100644 --- a/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp +++ b/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp @@ -50,6 +50,8 @@ void bind_edgedetectorconfig(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def("setSobelFilterKernels", &EdgeDetectorConfig::setSobelFilterKernels, py::arg("horizontalKernel"), py::arg("verticalKernel"), DOC(dai, EdgeDetectorConfig, setSobelFilterKernels)) .def("getConfigData", &EdgeDetectorConfig::getConfigData, DOC(dai, EdgeDetectorConfig, getConfigData)) + .def("get", &EdgeDetectorConfig::get, DOC(dai, EdgeDetectorConfig, get)) + .def("set", &EdgeDetectorConfig::set, py::arg("config"), DOC(dai, EdgeDetectorConfig, set)) ; } diff --git a/src/pipeline/datatype/IMUDataBindings.cpp b/src/pipeline/datatype/IMUDataBindings.cpp index 91bcbc3d2..4aedeafe6 100644 --- a/src/pipeline/datatype/IMUDataBindings.cpp +++ b/src/pipeline/datatype/IMUDataBindings.cpp @@ -48,6 +48,7 @@ void bind_imudata(pybind11::module& m, void* pCallstack){ .def_readwrite("tsDevice", &IMUReport::tsDevice) .def("getTimestamp", &IMUReport::getTimestamp, DOC(dai, IMUReport, getTimestamp)) .def("getTimestampDevice", &IMUReport::getTimestampDevice, DOC(dai, IMUReport, getTimestampDevice)) + .def("getSequenceNum", &IMUReport::getSequenceNum, DOC(dai, IMUReport, getSequenceNum)) ; imuReportAccuracy diff --git a/src/pipeline/datatype/ImageManipConfigBindings.cpp b/src/pipeline/datatype/ImageManipConfigBindings.cpp index 847d69d5a..694912999 100644 --- a/src/pipeline/datatype/ImageManipConfigBindings.cpp +++ b/src/pipeline/datatype/ImageManipConfigBindings.cpp @@ -97,7 +97,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ imageManipConfig .def(py::init<>()) // setters - .def("setCropRect", static_cast(&ImageManipConfig::setCropRect), py::arg("xmin"), py::arg("ymin"), py::arg("xmax"), py::arg("xmax"), DOC(dai, ImageManipConfig, setCropRect)) + .def("setCropRect", static_cast(&ImageManipConfig::setCropRect), py::arg("xmin"), py::arg("ymin"), py::arg("xmax"), py::arg("ymax"), DOC(dai, ImageManipConfig, setCropRect)) .def("setCropRect", static_cast)>(&ImageManipConfig::setCropRect), py::arg("coordinates"), DOC(dai, ImageManipConfig, setCropRect, 2)) .def("setCropRotatedRect", &ImageManipConfig::setCropRotatedRect, py::arg("rr"), py::arg("normalizedCoords") = true, DOC(dai, ImageManipConfig, setCropRotatedRect)) .def("setCenterCrop", &ImageManipConfig::setCenterCrop, py::arg("ratio"), py::arg("whRatio")=1.0f, DOC(dai, ImageManipConfig, setCenterCrop)) @@ -120,6 +120,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("setReusePreviousImage", &ImageManipConfig::setReusePreviousImage, py::arg("reuse"), DOC(dai, ImageManipConfig, setReusePreviousImage)) .def("setSkipCurrentImage", &ImageManipConfig::setSkipCurrentImage, py::arg("skip"), DOC(dai, ImageManipConfig, setSkipCurrentImage)) .def("setKeepAspectRatio", &ImageManipConfig::setKeepAspectRatio, py::arg("keep"), DOC(dai, ImageManipConfig, setKeepAspectRatio)) + .def("set", &ImageManipConfig::set, py::arg("config"), DOC(dai, ImageManipConfig, set)) // getters .def("getCropXMin", &ImageManipConfig::getCropXMin, DOC(dai, ImageManipConfig, getCropXMin)) @@ -133,6 +134,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("getFormatConfig", &ImageManipConfig::getFormatConfig, DOC(dai, ImageManipConfig, getFormatConfig)) .def("isResizeThumbnail", &ImageManipConfig::isResizeThumbnail, DOC(dai, ImageManipConfig, isResizeThumbnail)) .def("getColormap", &ImageManipConfig::getColormap, DOC(dai, ImageManipConfig, getColormap)) + .def("get", &ImageManipConfig::get, DOC(dai, ImageManipConfig, get)) ; diff --git a/src/pipeline/datatype/ImgFrameBindings.cpp b/src/pipeline/datatype/ImgFrameBindings.cpp index 763e3ae9c..6f57ce67d 100644 --- a/src/pipeline/datatype/ImgFrameBindings.cpp +++ b/src/pipeline/datatype/ImgFrameBindings.cpp @@ -119,8 +119,10 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def(py::init()) // getters - .def("getTimestamp", &ImgFrame::getTimestamp, DOC(dai, ImgFrame, getTimestamp)) - .def("getTimestampDevice", &ImgFrame::getTimestampDevice, DOC(dai, ImgFrame, getTimestampDevice)) + .def("getTimestamp", py::overload_cast<>(&ImgFrame::getTimestamp, py::const_), DOC(dai, ImgFrame, getTimestamp)) + .def("getTimestampDevice", py::overload_cast<>(&ImgFrame::getTimestampDevice, py::const_), DOC(dai, ImgFrame, getTimestampDevice)) + .def("getTimestamp", py::overload_cast(&ImgFrame::getTimestamp, py::const_), py::arg("offset"), DOC(dai, ImgFrame, getTimestamp)) + .def("getTimestampDevice", py::overload_cast(&ImgFrame::getTimestampDevice, py::const_), py::arg("offset"), DOC(dai, ImgFrame, getTimestampDevice)) .def("getInstanceNum", &ImgFrame::getInstanceNum, DOC(dai, ImgFrame, getInstanceNum)) .def("getCategory", &ImgFrame::getCategory, DOC(dai, ImgFrame, getCategory)) .def("getSequenceNum", &ImgFrame::getSequenceNum, DOC(dai, ImgFrame, getSequenceNum)) @@ -245,7 +247,7 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ + ", actual " + std::to_string(actualSize) + ". Maybe metadataOnly transfer was made?"); } else if(actualSize > requiredSize) { // FIXME check build on Windows - // spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); + // logger::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); } if(img.getWidth() <= 0 || img.getHeight() <= 0){ throw std::runtime_error("ImgFrame size invalid (width: " + std::to_string(img.getWidth()) + ", height: " + std::to_string(img.getHeight()) + ")"); diff --git a/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp b/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp index 9d9b9a61f..7d56a7d5e 100644 --- a/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp +++ b/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp @@ -43,8 +43,11 @@ void bind_spatiallocationcalculatorconfig(pybind11::module& m, void* pCallstack) spatialLocationCalculatorAlgorithm .value("AVERAGE", SpatialLocationCalculatorAlgorithm::AVERAGE) + .value("MEAN", SpatialLocationCalculatorAlgorithm::MEAN) .value("MIN", SpatialLocationCalculatorAlgorithm::MIN) .value("MAX", SpatialLocationCalculatorAlgorithm::MAX) + .value("MODE", SpatialLocationCalculatorAlgorithm::MODE) + .value("MEDIAN", SpatialLocationCalculatorAlgorithm::MEDIAN) ; spatialLocationCalculatorConfigData @@ -61,6 +64,8 @@ void bind_spatiallocationcalculatorconfig(pybind11::module& m, void* pCallstack) .def("setROIs", &SpatialLocationCalculatorConfig::setROIs, py::arg("ROIs"), DOC(dai, SpatialLocationCalculatorConfig, setROIs)) .def("addROI", &SpatialLocationCalculatorConfig::addROI, py::arg("ROI"), DOC(dai, SpatialLocationCalculatorConfig, addROI)) .def("getConfigData", &SpatialLocationCalculatorConfig::getConfigData, DOC(dai, SpatialLocationCalculatorConfig, getConfigData)) + .def("set", &SpatialLocationCalculatorConfig::set, py::arg("config"), DOC(dai, SpatialLocationCalculatorConfig, set)) + .def("get", &SpatialLocationCalculatorConfig::get, DOC(dai, SpatialLocationCalculatorConfig, get)) ; } diff --git a/src/pipeline/datatype/SpatialLocationCalculatorDataBindings.cpp b/src/pipeline/datatype/SpatialLocationCalculatorDataBindings.cpp index 52c6a4853..98ff81754 100644 --- a/src/pipeline/datatype/SpatialLocationCalculatorDataBindings.cpp +++ b/src/pipeline/datatype/SpatialLocationCalculatorDataBindings.cpp @@ -37,6 +37,8 @@ void bind_spatiallocationcalculatordata(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def_readwrite("config", &SpatialLocations::config, DOC(dai, SpatialLocations, config)) .def_readwrite("depthAverage", &SpatialLocations::depthAverage, DOC(dai, SpatialLocations, depthAverage)) + .def_readwrite("depthMode", &SpatialLocations::depthMode, DOC(dai, SpatialLocations, depthMode)) + .def_readwrite("depthMedian", &SpatialLocations::depthMedian, DOC(dai, SpatialLocations, depthMedian)) .def_readwrite("depthMin", &SpatialLocations::depthMin, DOC(dai, SpatialLocations, depthMin)) .def_readwrite("depthMax", &SpatialLocations::depthMax, DOC(dai, SpatialLocations, depthMax)) .def_readwrite("depthAveragePixelCount", &SpatialLocations::depthAveragePixelCount, DOC(dai, SpatialLocations, depthAveragePixelCount)) diff --git a/src/pipeline/datatype/StereoDepthConfigBindings.cpp b/src/pipeline/datatype/StereoDepthConfigBindings.cpp index f91b86eea..da9920303 100644 --- a/src/pipeline/datatype/StereoDepthConfigBindings.cpp +++ b/src/pipeline/datatype/StereoDepthConfigBindings.cpp @@ -26,6 +26,7 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ py::class_ temporalFilter(postProcessing, "TemporalFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, TemporalFilter)); py::enum_ persistencyMode(temporalFilter, "PersistencyMode", DOC(dai, RawStereoDepthConfig, PostProcessing, TemporalFilter, PersistencyMode)); py::class_ thresholdFilter(postProcessing, "ThresholdFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, ThresholdFilter)); + py::class_ brightnessFilter(postProcessing, "BrightnessFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, BrightnessFilter)); py::class_ speckleFilter(postProcessing, "SpeckleFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, SpeckleFilter)); py::class_ decimationFilter(postProcessing, "DecimationFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, DecimationFilter)); py::enum_ decimationMode(decimationFilter, "DecimationMode", DOC(dai, RawStereoDepthConfig, PostProcessing, DecimationFilter, DecimationMode)); @@ -92,6 +93,8 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ .def_readwrite("outlierRemoveThreshold", &RawStereoDepthConfig::AlgorithmControl::outlierRemoveThreshold, DOC(dai, RawStereoDepthConfig, AlgorithmControl, outlierRemoveThreshold)) .def_readwrite("outlierCensusThreshold", &RawStereoDepthConfig::AlgorithmControl::outlierCensusThreshold, DOC(dai, RawStereoDepthConfig, AlgorithmControl, outlierCensusThreshold)) .def_readwrite("outlierDiffThreshold", &RawStereoDepthConfig::AlgorithmControl::outlierDiffThreshold, DOC(dai, RawStereoDepthConfig, AlgorithmControl, outlierDiffThreshold)) + .def_readwrite("centerAlignmentShiftFactor", &RawStereoDepthConfig::AlgorithmControl::centerAlignmentShiftFactor, DOC(dai, RawStereoDepthConfig, AlgorithmControl, centerAlignmentShiftFactor)) + .def_readwrite("numInvalidateEdgePixels", &RawStereoDepthConfig::AlgorithmControl::numInvalidateEdgePixels, DOC(dai, RawStereoDepthConfig, AlgorithmControl, numInvalidateEdgePixels)) ; spatialFilter @@ -129,6 +132,12 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ .def_readwrite("maxRange", &RawStereoDepthConfig::PostProcessing::ThresholdFilter::maxRange, DOC(dai, RawStereoDepthConfig, PostProcessing, ThresholdFilter, maxRange)) ; + brightnessFilter + .def(py::init<>()) + .def_readwrite("minBrightness", &RawStereoDepthConfig::PostProcessing::BrightnessFilter::minBrightness, DOC(dai, RawStereoDepthConfig, PostProcessing, BrightnessFilter, minBrightness)) + .def_readwrite("maxBrightness", &RawStereoDepthConfig::PostProcessing::BrightnessFilter::maxBrightness, DOC(dai, RawStereoDepthConfig, PostProcessing, BrightnessFilter, maxBrightness)) + ; + speckleFilter .def(py::init<>()) .def_readwrite("enable", &RawStereoDepthConfig::PostProcessing::SpeckleFilter::enable, DOC(dai, RawStereoDepthConfig, PostProcessing, SpeckleFilter, enable)) @@ -154,6 +163,7 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ .def_readwrite("spatialFilter", &RawStereoDepthConfig::PostProcessing::spatialFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, spatialFilter)) .def_readwrite("temporalFilter", &RawStereoDepthConfig::PostProcessing::temporalFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, temporalFilter)) .def_readwrite("thresholdFilter", &RawStereoDepthConfig::PostProcessing::thresholdFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, thresholdFilter)) + .def_readwrite("brightnessFilter", &RawStereoDepthConfig::PostProcessing::brightnessFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, brightnessFilter)) .def_readwrite("speckleFilter", &RawStereoDepthConfig::PostProcessing::speckleFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, speckleFilter)) .def_readwrite("decimationFilter", &RawStereoDepthConfig::PostProcessing::decimationFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, decimationFilter)) ; @@ -245,6 +255,7 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ .def("setDepthUnit", &StereoDepthConfig::setDepthUnit, DOC(dai, StereoDepthConfig, setDepthUnit)) .def("getDepthUnit", &StereoDepthConfig::getDepthUnit, DOC(dai, StereoDepthConfig, getDepthUnit)) .def("setDisparityShift", &StereoDepthConfig::setDisparityShift, DOC(dai, StereoDepthConfig, setDisparityShift)) + .def("setNumInvalidateEdgePixels", &StereoDepthConfig::setNumInvalidateEdgePixels, DOC(dai, StereoDepthConfig, setNumInvalidateEdgePixels)) .def("set", &StereoDepthConfig::set, py::arg("config"), DOC(dai, StereoDepthConfig, set)) .def("get", &StereoDepthConfig::get, DOC(dai, StereoDepthConfig, get)) ; diff --git a/src/pipeline/datatype/ToFConfigBindings.cpp b/src/pipeline/datatype/ToFConfigBindings.cpp index 177d50f4e..a51fae8ac 100644 --- a/src/pipeline/datatype/ToFConfigBindings.cpp +++ b/src/pipeline/datatype/ToFConfigBindings.cpp @@ -17,7 +17,9 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ using namespace dai; py::class_> rawToFConfig(m, "RawToFConfig", DOC(dai, RawToFConfig)); - py::class_> tofConfig(m, "ToFConfig", DOC(dai, ToFConfig)); + py::class_ depthParams(rawToFConfig, "DepthParams", DOC(dai, RawToFConfig, DepthParams)); + py::enum_ depthParamsTypeFMod(depthParams, "TypeFMod", DOC(dai, RawToFConfig, DepthParams, TypeFMod)); + py::class_> toFConfig(m, "ToFConfig", DOC(dai, ToFConfig)); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -35,16 +37,38 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ // Metadata / raw rawToFConfig .def(py::init<>()) - .def_readwrite("dummy", &RawToFConfig::dummy, DOC(dai, RawToFConfig, dummy)) + .def_readwrite("depthParams", &RawToFConfig::depthParams, DOC(dai, RawToFConfig, depthParams)) + ; + + depthParamsTypeFMod + .value("ALL", RawToFConfig::DepthParams::TypeFMod::F_MOD_ALL) + .value("MIN", RawToFConfig::DepthParams::TypeFMod::F_MOD_MIN) + .value("MAX", RawToFConfig::DepthParams::TypeFMod::F_MOD_MAX) + ; + + depthParams + .def(py::init<>()) + .def_readwrite("enable", &RawToFConfig::DepthParams::enable, DOC(dai, RawToFConfig, DepthParams, enable)) + .def_readwrite("freqModUsed", &RawToFConfig::DepthParams::freqModUsed, DOC(dai, RawToFConfig, DepthParams, freqModUsed)) + .def_readwrite("avgPhaseShuffle", &RawToFConfig::DepthParams::avgPhaseShuffle, DOC(dai, RawToFConfig, DepthParams, avgPhaseShuffle)) + .def_readwrite("minimumAmplitude", &RawToFConfig::DepthParams::minimumAmplitude, DOC(dai, RawToFConfig, DepthParams, minimumAmplitude)) ; // Message - tofConfig + toFConfig .def(py::init<>()) .def(py::init>()) + + .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams)) + .def("setFreqModUsed", static_cast(&ToFConfig::setFreqModUsed), DOC(dai, ToFConfig, setFreqModUsed)) + .def("setAvgPhaseShuffle", &ToFConfig::setAvgPhaseShuffle, DOC(dai, ToFConfig, setAvgPhaseShuffle)) + .def("setMinAmplitude", &ToFConfig::setMinAmplitude, DOC(dai, ToFConfig, setMinAmplitude)) .def("set", &ToFConfig::set, py::arg("config"), DOC(dai, ToFConfig, set)) .def("get", &ToFConfig::get, DOC(dai, ToFConfig, get)) ; + // add aliases + m.attr("ToFConfig").attr("DepthParams") = m.attr("RawToFConfig").attr("DepthParams"); + } diff --git a/src/pipeline/node/CameraBindings.cpp b/src/pipeline/node/CameraBindings.cpp new file mode 100644 index 000000000..2c52bb282 --- /dev/null +++ b/src/pipeline/node/CameraBindings.cpp @@ -0,0 +1,167 @@ +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/Camera.hpp" + +void bind_camera(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ cameraProperties(m, "CameraProperties", DOC(dai, CameraProperties)); + py::enum_ cameraPropertiesWarpMeshSource(cameraProperties, "WarpMeshSource", DOC(dai, CameraProperties, WarpMeshSource)); + py::enum_ cameraPropertiesColorOrder(cameraProperties, "ColorOrder", DOC(dai, CameraProperties, ColorOrder)); + auto camera = ADD_NODE(Camera); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // // Camera Properties + // cameraPropertiesSensorResolution + // .value("THE_1080_P", CameraProperties::SensorResolution::THE_1080_P) + // .value("THE_1200_P", CameraProperties::SensorResolution::THE_1200_P) + // .value("THE_4_K", CameraProperties::SensorResolution::THE_4_K) + // .value("THE_5_MP", CameraProperties::SensorResolution::THE_5_MP) + // .value("THE_12_MP", CameraProperties::SensorResolution::THE_12_MP) + // .value("THE_4000X3000", CameraProperties::SensorResolution::THE_4000X3000) + // .value("THE_13_MP", CameraProperties::SensorResolution::THE_13_MP) + // .value("THE_5312X6000", CameraProperties::SensorResolution::THE_5312X6000) + // .value("THE_48_MP", CameraProperties::SensorResolution::THE_48_MP) + // .value("THE_720_P", CameraProperties::SensorResolution::THE_720_P) + // .value("THE_800_P", CameraProperties::SensorResolution::THE_800_P) + // ; + + // Camera Properties - WarpMeshSource + cameraPropertiesWarpMeshSource + .value("AUTO", CameraProperties::WarpMeshSource::AUTO) + .value("NONE", CameraProperties::WarpMeshSource::NONE) + .value("CALIBRATION", CameraProperties::WarpMeshSource::CALIBRATION) + .value("URI", CameraProperties::WarpMeshSource::URI) + ; + + cameraPropertiesColorOrder + .value("BGR", CameraProperties::ColorOrder::BGR) + .value("RGB", CameraProperties::ColorOrder::RGB) + ; + + cameraProperties + .def_readwrite("initialControl", &CameraProperties::initialControl) + .def_readwrite("boardSocket", &CameraProperties::boardSocket) + .def_readwrite("imageOrientation", &CameraProperties::imageOrientation) + .def_readwrite("colorOrder", &CameraProperties::colorOrder) + .def_readwrite("interleaved", &CameraProperties::interleaved) + .def_readwrite("fp16", &CameraProperties::fp16) + .def_readwrite("previewHeight", &CameraProperties::previewHeight) + .def_readwrite("previewWidth", &CameraProperties::previewWidth) + .def_readwrite("videoHeight", &CameraProperties::videoHeight) + .def_readwrite("videoWidth", &CameraProperties::videoWidth) + .def_readwrite("stillHeight", &CameraProperties::stillHeight) + .def_readwrite("stillWidth", &CameraProperties::stillWidth) + // .def_readwrite("resolution", &CameraProperties::resolution) + .def_readwrite("fps", &CameraProperties::fps) + .def_readwrite("isp3aFps", &CameraProperties::isp3aFps) + .def_readwrite("sensorCropX", &CameraProperties::sensorCropX) + .def_readwrite("sensorCropY", &CameraProperties::sensorCropY) + .def_readwrite("previewKeepAspectRatio", &CameraProperties::previewKeepAspectRatio) + .def_readwrite("ispScale", &CameraProperties::ispScale) + + .def_readwrite("numFramesPoolRaw", &CameraProperties::numFramesPoolRaw) + .def_readwrite("numFramesPoolIsp", &CameraProperties::numFramesPoolIsp) + .def_readwrite("numFramesPoolVideo", &CameraProperties::numFramesPoolVideo) + .def_readwrite("numFramesPoolPreview", &CameraProperties::numFramesPoolPreview) + .def_readwrite("numFramesPoolStill", &CameraProperties::numFramesPoolStill) + + .def_readwrite("warpMeshSource", &CameraProperties::warpMeshSource) + .def_readwrite("warpMeshUri", &CameraProperties::warpMeshUri) + .def_readwrite("warpMeshWidth", &CameraProperties::warpMeshWidth) + .def_readwrite("warpMeshHeight", &CameraProperties::warpMeshHeight) + .def_readwrite("calibAlpha", &CameraProperties::calibAlpha) + .def_readwrite("warpMeshStepWidth", &CameraProperties::warpMeshStepWidth) + .def_readwrite("warpMeshStepHeight", &CameraProperties::warpMeshStepHeight) + ; + + // Camera node + camera + .def_readonly("inputConfig", &Camera::inputConfig, DOC(dai, node, Camera, inputConfig)) + .def_readonly("inputControl", &Camera::inputControl, DOC(dai, node, Camera, inputControl)) + .def_readonly("initialControl", &Camera::initialControl, DOC(dai, node, Camera, initialControl)) + .def_readonly("video", &Camera::video, DOC(dai, node, Camera, video)) + .def_readonly("preview", &Camera::preview, DOC(dai, node, Camera, preview)) + .def_readonly("still", &Camera::still, DOC(dai, node, Camera, still)) + .def_readonly("isp", &Camera::isp, DOC(dai, node, Camera, isp)) + .def_readonly("raw", &Camera::raw, DOC(dai, node, Camera, raw)) + .def_readonly("frameEvent", &Camera::frameEvent, DOC(dai, node, Camera, frameEvent)) + // .def_readonly("mockIsp", &Camera::mockIsp, DOC(dai, node, Camera, mockIsp)) + .def("setBoardSocket", &Camera::setBoardSocket, py::arg("boardSocket"), DOC(dai, node, Camera, setBoardSocket)) + .def("getBoardSocket", &Camera::getBoardSocket, DOC(dai, node, Camera, getBoardSocket)) + .def("setImageOrientation", &Camera::setImageOrientation, py::arg("imageOrientation"), DOC(dai, node, Camera, setImageOrientation)) + .def("getImageOrientation", &Camera::getImageOrientation, DOC(dai, node, Camera, getImageOrientation)) + .def("setPreviewSize", static_cast(&Camera::setPreviewSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setPreviewSize)) + .def("setPreviewSize", static_cast)>(&Camera::setPreviewSize), py::arg("size"), DOC(dai, node, Camera, setPreviewSize, 2)) + .def("setVideoSize", static_cast(&Camera::setVideoSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setVideoSize)) + .def("setVideoSize", static_cast)>(&Camera::setVideoSize), py::arg("size"), DOC(dai, node, Camera, setVideoSize, 2)) + .def("setStillSize", static_cast(&Camera::setStillSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setStillSize)) + .def("setStillSize", static_cast)>(&Camera::setStillSize), py::arg("size"), DOC(dai, node, Camera, setStillSize, 2)) + // .def("setResolution", &Camera::setResolution, py::arg("resolution"), DOC(dai, node, Camera, setResolution)) + // .def("getResolution", &Camera::getResolution, DOC(dai, node, Camera, getResolution)) + .def("setFps", &Camera::setFps, py::arg("fps"), DOC(dai, node, Camera, setFps)) + .def("setIsp3aFps", &Camera::setIsp3aFps, DOC(dai, node, Camera, setIsp3aFps)) + .def("getFps", &Camera::getFps, DOC(dai, node, Camera, getFps)) + .def("getPreviewSize", &Camera::getPreviewSize, DOC(dai, node, Camera, getPreviewSize)) + .def("getPreviewWidth", &Camera::getPreviewWidth, DOC(dai, node, Camera, getPreviewWidth)) + .def("getPreviewHeight", &Camera::getPreviewHeight, DOC(dai, node, Camera, getPreviewHeight)) + .def("getVideoSize", &Camera::getVideoSize, DOC(dai, node, Camera, getVideoSize)) + .def("getVideoWidth", &Camera::getVideoWidth, DOC(dai, node, Camera, getVideoWidth)) + .def("getVideoHeight", &Camera::getVideoHeight, DOC(dai, node, Camera, getVideoHeight)) + .def("getStillSize", &Camera::getStillSize, DOC(dai, node, Camera, getStillSize)) + .def("getStillWidth", &Camera::getStillWidth, DOC(dai, node, Camera, getStillWidth)) + .def("getStillHeight", &Camera::getStillHeight, DOC(dai, node, Camera, getStillHeight)) + .def("getSize", &Camera::getSize, DOC(dai, node, Camera, getSize)) + .def("getWidth", &Camera::getWidth, DOC(dai, node, Camera, getWidth)) + .def("getHeight", &Camera::getHeight, DOC(dai, node, Camera, getHeight)) + // .def("sensorCenterCrop", &Camera::sensorCenterCrop, DOC(dai, node, Camera, sensorCenterCrop)) + // .def("setSensorCrop", &Camera::setSensorCrop, py::arg("x"), py::arg("y"), DOC(dai, node, Camera, setSensorCrop)) + // .def("getSensorCrop", &Camera::getSensorCrop, DOC(dai, node, Camera, getSensorCrop)) + // .def("getSensorCropX", &Camera::getSensorCropX, DOC(dai, node, Camera, getSensorCropX)) + // .def("getSensorCropY", &Camera::getSensorCropY, DOC(dai, node, Camera, getSensorCropY)) + + // .def("setIspScale", static_cast(&Camera::setIspScale), py::arg("numerator"), py::arg("denominator"), DOC(dai, node, Camera, setIspScale)) + // .def("setIspScale", static_cast)>(&Camera::setIspScale), py::arg("scale"), DOC(dai, node, Camera, setIspScale, 2)) + // .def("setIspScale", static_cast(&Camera::setIspScale), py::arg("horizNum"), py::arg("horizDenom"), py::arg("vertNum"), py::arg("vertDenom"), DOC(dai, node, Camera, setIspScale, 3)) + // .def("setIspScale", static_cast,std::tuple)>(&Camera::setIspScale), py::arg("horizScale"), py::arg("vertScale"), DOC(dai, node, Camera, setIspScale, 4)) + + .def("setCamera", &Camera::setCamera, py::arg("name"), DOC(dai, node, Camera, setCamera)) + .def("getCamera", &Camera::getCamera, DOC(dai, node, Camera, getCamera)) + + .def("setSize", static_cast(&Camera::setSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setSize)) + .def("setSize", static_cast)>(&Camera::setSize), py::arg("size"), DOC(dai, node, Camera, setSize, 2)) + + + .def("setMeshSource", &Camera::setMeshSource, py::arg("source"), DOC(dai, node, Camera, setMeshSource)) + .def("getMeshSource", &Camera::getMeshSource, DOC(dai, node, Camera, getMeshSource)) + .def("loadMeshFile", &Camera::loadMeshFile, py::arg("warpMesh"), DOC(dai, node, Camera, loadMeshFile)) + .def("loadMeshData", &Camera::loadMeshData, py::arg("warpMesh"), DOC(dai, node, Camera, loadMeshData)) + .def("setMeshStep", &Camera::setMeshStep, py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setMeshStep)) + .def("getMeshStep", &Camera::getMeshStep, DOC(dai, node, Camera, getMeshStep)) + .def("setCalibrationAlpha", &Camera::setCalibrationAlpha, py::arg("alpha"), DOC(dai, node, Camera, setCalibrationAlpha)) + .def("getCalibrationAlpha", &Camera::getCalibrationAlpha, DOC(dai, node, Camera, getCalibrationAlpha)) + + .def("setRawOutputPacked", &Camera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, Camera, setRawOutputPacked)) + ; + // ALIAS + daiNodeModule.attr("Camera").attr("Properties") = cameraProperties; + +} diff --git a/src/pipeline/node/ColorCameraBindings.cpp b/src/pipeline/node/ColorCameraBindings.cpp index fa4d7a0e3..138809341 100644 --- a/src/pipeline/node/ColorCameraBindings.cpp +++ b/src/pipeline/node/ColorCameraBindings.cpp @@ -48,6 +48,7 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .value("THE_2000X1500", ColorCameraProperties::SensorResolution::THE_2000X1500) .value("THE_2028X1520", ColorCameraProperties::SensorResolution::THE_2028X1520) .value("THE_2104X1560", ColorCameraProperties::SensorResolution::THE_2104X1560) + .value("THE_1440X1080", ColorCameraProperties::SensorResolution::THE_1440X1080) ; colorCameraPropertiesColorOrder @@ -77,6 +78,7 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def_readwrite("stillWidth", &ColorCameraProperties::stillWidth) .def_readwrite("resolution", &ColorCameraProperties::resolution) .def_readwrite("fps", &ColorCameraProperties::fps) + .def_readwrite("isp3aFps", &ColorCameraProperties::isp3aFps) .def_readwrite("sensorCropX", &ColorCameraProperties::sensorCropX) .def_readwrite("sensorCropY", &ColorCameraProperties::sensorCropY) .def_readwrite("previewKeepAspectRatio", &ColorCameraProperties::previewKeepAspectRatio) @@ -93,6 +95,7 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def_readwrite("calibAlpha", &ColorCameraProperties::calibAlpha) .def_readwrite("warpMeshStepWidth", &ColorCameraProperties::warpMeshStepWidth) .def_readwrite("warpMeshStepHeight", &ColorCameraProperties::warpMeshStepHeight) + .def_readwrite("eventFilter", &ColorCameraProperties::eventFilter) ; // ColorCamera node @@ -141,7 +144,10 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def("setResolution", &ColorCamera::setResolution, py::arg("resolution"), DOC(dai, node, ColorCamera, setResolution)) .def("getResolution", &ColorCamera::getResolution, DOC(dai, node, ColorCamera, getResolution)) .def("setFps", &ColorCamera::setFps, py::arg("fps"), DOC(dai, node, ColorCamera, setFps)) + .def("setIsp3aFps", &ColorCamera::setIsp3aFps, DOC(dai, node, ColorCamera, setIsp3aFps)) .def("getFps", &ColorCamera::getFps, DOC(dai, node, ColorCamera, getFps)) + .def("setFrameEventFilter", &ColorCamera::setFrameEventFilter, py::arg("events"), DOC(dai, node, ColorCamera, setFrameEventFilter)) + .def("getFrameEventFilter", &ColorCamera::getFrameEventFilter, DOC(dai, node, ColorCamera, getFrameEventFilter)) .def("getPreviewSize", &ColorCamera::getPreviewSize, DOC(dai, node, ColorCamera, getPreviewSize)) .def("getPreviewWidth", &ColorCamera::getPreviewWidth, DOC(dai, node, ColorCamera, getPreviewWidth)) .def("getPreviewHeight", &ColorCamera::getPreviewHeight, DOC(dai, node, ColorCamera, getPreviewHeight)) @@ -214,6 +220,8 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def("setCalibrationAlpha", &ColorCamera::setCalibrationAlpha, py::arg("alpha"), DOC(dai, node, ColorCamera, setCalibrationAlpha)) .def("getCalibrationAlpha", &ColorCamera::getCalibrationAlpha, DOC(dai, node, ColorCamera, getCalibrationAlpha)) + + .def("setRawOutputPacked", &ColorCamera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, ColorCamera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("ColorCamera").attr("Properties") = colorCameraProperties; diff --git a/src/pipeline/node/DetectionParserBindings.cpp b/src/pipeline/node/DetectionParserBindings.cpp index 8d6dfb0b4..95e377495 100644 --- a/src/pipeline/node/DetectionParserBindings.cpp +++ b/src/pipeline/node/DetectionParserBindings.cpp @@ -36,9 +36,11 @@ void bind_detectionparser(pybind11::module& m, void* pCallstack){ detectionParser .def_readonly("input", &DetectionParser::input, DOC(dai, node, DetectionParser, input)) .def_readonly("out", &DetectionParser::out, DOC(dai, node, DetectionParser, out)) + .def("setBlobPath", &DetectionParser::setBlobPath, py::arg("path"), DOC(dai, node, DetectionParser, setBlobPath)) .def("setNumFramesPool", &DetectionParser::setNumFramesPool, py::arg("numFramesPool"), DOC(dai, node, DetectionParser, setNumFramesPool)) .def("getNumFramesPool", &DetectionParser::getNumFramesPool, DOC(dai, node, DetectionParser, getNumFramesPool)) - .def("setBlob", &DetectionParser::setBlob, py::arg("blob"), DOC(dai, node, DetectionParser, setBlob)) + .def("setBlob", py::overload_cast(&DetectionParser::setBlob), py::arg("blob"), DOC(dai, node, DetectionParser, setBlob)) + .def("setBlob", py::overload_cast(&DetectionParser::setBlob), py::arg("path"), DOC(dai, node, DetectionParser, setBlob, 2)) .def("setInputImageSize", static_cast(&DetectionParser::setInputImageSize), py::arg("width"), py::arg("height"), DOC(dai, node, DetectionParser, setInputImageSize)) .def("setInputImageSize", static_cast)>(&DetectionParser::setInputImageSize), py::arg("size"), DOC(dai, node, DetectionParser, setInputImageSize, 2)) .def("setNNFamily", &DetectionParser::setNNFamily, py::arg("type"), DOC(dai, node, DetectionParser, setNNFamily)) diff --git a/src/pipeline/node/MonoCameraBindings.cpp b/src/pipeline/node/MonoCameraBindings.cpp index b25acaf6d..4e6765eb4 100644 --- a/src/pipeline/node/MonoCameraBindings.cpp +++ b/src/pipeline/node/MonoCameraBindings.cpp @@ -36,6 +36,8 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .value("THE_400_P", MonoCameraProperties::SensorResolution::THE_400_P) .value("THE_480_P", MonoCameraProperties::SensorResolution::THE_480_P) .value("THE_1200_P", MonoCameraProperties::SensorResolution::THE_1200_P) + .value("THE_4000X3000", MonoCameraProperties::SensorResolution::THE_4000X3000) + .value("THE_4224X3136", MonoCameraProperties::SensorResolution::THE_4224X3136) ; monoCameraProperties @@ -43,8 +45,10 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def_readwrite("boardSocket", &MonoCameraProperties::boardSocket) .def_readwrite("resolution", &MonoCameraProperties::resolution) .def_readwrite("fps", &MonoCameraProperties::fps) + .def_readwrite("isp3aFps", &MonoCameraProperties::isp3aFps) .def_readwrite("numFramesPool", &MonoCameraProperties::numFramesPool) .def_readwrite("numFramesPoolRaw", &MonoCameraProperties::numFramesPoolRaw) + .def_readwrite("eventFilter", &MonoCameraProperties::eventFilter) ; // Node @@ -76,7 +80,10 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def("getImageOrientation", &MonoCamera::getImageOrientation, DOC(dai, node, MonoCamera, getImageOrientation)) .def("setResolution", &MonoCamera::setResolution, py::arg("resolution"), DOC(dai, node, MonoCamera, setResolution)) .def("getResolution", &MonoCamera::getResolution, DOC(dai, node, MonoCamera, getResolution)) + .def("setFrameEventFilter", &MonoCamera::setFrameEventFilter, py::arg("events"), DOC(dai, node, MonoCamera, setFrameEventFilter)) + .def("getFrameEventFilter", &MonoCamera::getFrameEventFilter, DOC(dai, node, MonoCamera, getFrameEventFilter)) .def("setFps", &MonoCamera::setFps, py::arg("fps"), DOC(dai, node, MonoCamera, setFps)) + .def("setIsp3aFps", &MonoCamera::setIsp3aFps, DOC(dai, node, MonoCamera, setIsp3aFps)) .def("getFps", &MonoCamera::getFps, DOC(dai, node, MonoCamera, getFps)) .def("getResolutionSize", &MonoCamera::getResolutionSize, DOC(dai, node, MonoCamera, getResolutionSize)) .def("getResolutionWidth", &MonoCamera::getResolutionWidth, DOC(dai, node, MonoCamera, getResolutionWidth)) @@ -87,6 +94,8 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def("getRawNumFramesPool", &MonoCamera::getRawNumFramesPool, DOC(dai, node, MonoCamera, getRawNumFramesPool)) .def("setCamera", &MonoCamera::setCamera, py::arg("name"), DOC(dai, node, MonoCamera, setCamera)) .def("getCamera", &MonoCamera::getCamera, DOC(dai, node, MonoCamera, getCamera)) + + .def("setRawOutputPacked", &MonoCamera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, MonoCamera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("MonoCamera").attr("Properties") = monoCameraProperties; diff --git a/src/pipeline/node/NodeBindings.cpp b/src/pipeline/node/NodeBindings.cpp index ee3840230..3f46a25a3 100644 --- a/src/pipeline/node/NodeBindings.cpp +++ b/src/pipeline/node/NodeBindings.cpp @@ -124,6 +124,7 @@ void bind_xlinkin(pybind11::module& m, void* pCallstack); void bind_xlinkout(pybind11::module& m, void* pCallstack); void bind_benchmark(pybind11::module& m, void* pCallstack); void bind_colorcamera(pybind11::module& m, void* pCallstack); +void bind_camera(pybind11::module& m, void* pCallstack); void bind_monocamera(pybind11::module& m, void* pCallstack); void bind_stereodepth(pybind11::module& m, void* pCallstack); void bind_neuralnetwork(pybind11::module& m, void* pCallstack); @@ -145,6 +146,7 @@ void bind_tof(pybind11::module& m, void* pCallstack); void bind_apriltag(pybind11::module& m, void* pCallstack); void bind_detectionparser(pybind11::module& m, void* pCallstack); void bind_sync(pybind11::module& m, void* pCallstack); +void bind_uvc(pybind11::module& m, void* pCallstack); void NodeBindings::addToCallstack(std::deque& callstack) { // Bind Node et al @@ -155,6 +157,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_xlinkout); callstack.push_front(bind_benchmark); callstack.push_front(bind_colorcamera); + callstack.push_front(bind_camera); callstack.push_front(bind_monocamera); callstack.push_front(bind_stereodepth); callstack.push_front(bind_neuralnetwork); @@ -176,6 +179,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_apriltag); callstack.push_front(bind_detectionparser); callstack.push_front(bind_sync); + callstack.push_front(bind_uvc); } void NodeBindings::bind(pybind11::module& m, void* pCallstack){ @@ -248,6 +252,9 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("group", &Node::Input::group, DOC(dai, Node, Input, group)) .def_readwrite("name", &Node::Input::name, DOC(dai, Node, Input, name)) .def_readwrite("type", &Node::Input::type, DOC(dai, Node, Input, type)) + .def_readwrite("possibleDatatypes", &Node::Input::possibleDatatypes, DOC(dai, Node, Input, possibleDatatypes)) + .def("getParent", static_cast(&Node::Input::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Input, getParent)) + .def("getParent", static_cast(&Node::Input::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Input, getParent)) .def("setBlocking", &Node::Input::setBlocking, py::arg("blocking"), DOC(dai, Node, Input, setBlocking)) .def("getBlocking", &Node::Input::getBlocking, DOC(dai, Node, Input, getBlocking)) .def("setQueueSize", &Node::Input::setQueueSize, py::arg("size"), DOC(dai, Node, Input, setQueueSize)) @@ -317,6 +324,9 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("name", &Node::Output::name, DOC(dai, Node, Output, name)) .def_readwrite("type", &Node::Output::type, DOC(dai, Node, Output, type)) .def_readwrite("possibleDatatypes", &Node::Output::possibleDatatypes, DOC(dai, Node, Output, possibleDatatypes)) + .def("getParent", static_cast(&Node::Output::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Output, getParent)) + .def("getParent", static_cast(&Node::Output::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Output, getParent)) + .def("isSamePipeline", &Node::Output::isSamePipeline, py::arg("input"), DOC(dai, Node, Output, isSamePipeline)) .def("canConnect", &Node::Output::canConnect, py::arg("input"), DOC(dai, Node, Output, canConnect)) .def("link", &Node::Output::link, py::arg("input"), DOC(dai, Node, Output, link)) .def("unlink", &Node::Output::unlink, py::arg("input"), DOC(dai, Node, Output, unlink)) diff --git a/src/pipeline/node/ObjectTrackerBindings.cpp b/src/pipeline/node/ObjectTrackerBindings.cpp index 75f1362fb..b2b2b76a3 100644 --- a/src/pipeline/node/ObjectTrackerBindings.cpp +++ b/src/pipeline/node/ObjectTrackerBindings.cpp @@ -65,6 +65,7 @@ void bind_objecttracker(pybind11::module& m, void* pCallstack){ .def("setDetectionLabelsToTrack", &ObjectTracker::setDetectionLabelsToTrack, py::arg("labels"), DOC(dai, node, ObjectTracker, setDetectionLabelsToTrack)) .def("setTrackerType", &ObjectTracker::setTrackerType, py::arg("type"), DOC(dai, node, ObjectTracker, setTrackerType)) .def("setTrackerIdAssignmentPolicy", &ObjectTracker::setTrackerIdAssignmentPolicy, py::arg("type"), DOC(dai, node, ObjectTracker, setTrackerIdAssignmentPolicy)) + .def("setTrackingPerClass", &ObjectTracker::setTrackingPerClass, py::arg("trackingPerClass"), DOC(dai, node, ObjectTracker, setTrackingPerClass)) ; daiNodeModule.attr("ObjectTracker").attr("Properties") = objectTrackerProperties; diff --git a/src/pipeline/node/StereoDepthBindings.cpp b/src/pipeline/node/StereoDepthBindings.cpp index 7f02ccf75..092d4c9f7 100644 --- a/src/pipeline/node/StereoDepthBindings.cpp +++ b/src/pipeline/node/StereoDepthBindings.cpp @@ -53,6 +53,14 @@ void bind_stereodepth(pybind11::module& m, void* pCallstack){ .def_readwrite("numFramesPool", &StereoDepthProperties::numFramesPool, DOC(dai, StereoDepthProperties, numFramesPool)) .def_readwrite("numPostProcessingShaves", &StereoDepthProperties::numPostProcessingShaves, DOC(dai, StereoDepthProperties, numPostProcessingShaves)) .def_readwrite("numPostProcessingMemorySlices", &StereoDepthProperties::numPostProcessingMemorySlices, DOC(dai, StereoDepthProperties, numPostProcessingMemorySlices)) + .def_readwrite("focalLengthFromCalibration", &StereoDepthProperties::focalLengthFromCalibration, DOC(dai, StereoDepthProperties, focalLengthFromCalibration)) + .def_readwrite("useHomographyRectification", &StereoDepthProperties::useHomographyRectification, DOC(dai, StereoDepthProperties, useHomographyRectification)) + .def_readwrite("baseline", &StereoDepthProperties::baseline, DOC(dai, StereoDepthProperties, baseline)) + .def_readwrite("focalLength", &StereoDepthProperties::focalLength, DOC(dai, StereoDepthProperties, focalLength)) + .def_readwrite("disparityToDepthUseSpecTranslation", &StereoDepthProperties::disparityToDepthUseSpecTranslation, DOC(dai, StereoDepthProperties, disparityToDepthUseSpecTranslation)) + .def_readwrite("rectificationUseSpecTranslation", &StereoDepthProperties::rectificationUseSpecTranslation, DOC(dai, StereoDepthProperties, rectificationUseSpecTranslation)) + .def_readwrite("depthAlignmentUseSpecTranslation", &StereoDepthProperties::depthAlignmentUseSpecTranslation, DOC(dai, StereoDepthProperties, depthAlignmentUseSpecTranslation)) + .def_readwrite("alphaScaling", &StereoDepthProperties::alphaScaling, DOC(dai, StereoDepthProperties, alphaScaling)) ; stereoDepthPresetMode @@ -172,6 +180,12 @@ void bind_stereodepth(pybind11::module& m, void* pCallstack){ .def("enableDistortionCorrection", &StereoDepth::enableDistortionCorrection, DOC(dai, node, StereoDepth, enableDistortionCorrection)) .def("setVerticalStereo", &StereoDepth::setVerticalStereo, DOC(dai, node, StereoDepth, setVerticalStereo)) .def("setCustomPixelDescriptors", &StereoDepth::setCustomPixelDescriptors, DOC(dai, node, StereoDepth, setCustomPixelDescriptors)) + .def("setBaseline", &StereoDepth::setBaseline, DOC(dai, node, StereoDepth, setBaseline)) + .def("setFocalLength", &StereoDepth::setFocalLength, DOC(dai, node, StereoDepth, setFocalLength)) + .def("setDisparityToDepthUseSpecTranslation", &StereoDepth::setDisparityToDepthUseSpecTranslation, DOC(dai, node, StereoDepth, setDisparityToDepthUseSpecTranslation)) + .def("setRectificationUseSpecTranslation", &StereoDepth::setRectificationUseSpecTranslation, DOC(dai, node, StereoDepth, setRectificationUseSpecTranslation)) + .def("setDepthAlignmentUseSpecTranslation", &StereoDepth::setDepthAlignmentUseSpecTranslation, DOC(dai, node, StereoDepth, setDepthAlignmentUseSpecTranslation)) + .def("setAlphaScaling", &StereoDepth::setAlphaScaling, DOC(dai, node, StereoDepth, setAlphaScaling)) ; // ALIAS daiNodeModule.attr("StereoDepth").attr("Properties") = stereoDepthProperties; diff --git a/src/pipeline/node/ToFBindings.cpp b/src/pipeline/node/ToFBindings.cpp index 94497bcde..e641f9e98 100644 --- a/src/pipeline/node/ToFBindings.cpp +++ b/src/pipeline/node/ToFBindings.cpp @@ -28,19 +28,21 @@ void bind_tof(pybind11::module& m, void* pCallstack){ /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// - // ToF properties + // Properties tofProperties .def_readwrite("initialConfig", &ToFProperties::initialConfig, DOC(dai, ToFProperties, initialConfig)) ; - // ToF Node + // Node tof - .def_readonly("inputConfig", &ToF::inputConfig, DOC(dai, node, ToF, inputConfig)) - .def_readonly("inputRaw", &ToF::inputRaw, DOC(dai, node, ToF, inputRaw)) - .def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth)) - .def_readonly("passthroughInputRaw", &ToF::passthroughInputRaw, DOC(dai, node, ToF, passthroughInputRaw)) - .def_readonly("initialConfig", &ToF::initialConfig, DOC(dai, node, ToF, initialConfig)) - ; + .def_readonly("inputConfig", &ToF::inputConfig, DOC(dai, node, ToF, inputConfig), DOC(dai, node, ToF, inputConfig)) + .def_readonly("input", &ToF::input, DOC(dai, node, ToF, input), DOC(dai, node, ToF, input)) + .def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth), DOC(dai, node, ToF, depth)) + .def_readonly("amplitude", &ToF::amplitude, DOC(dai, node, ToF, amplitude), DOC(dai, node, ToF, amplitude)) + .def_readonly("error", &ToF::error, DOC(dai, node, ToF, error), DOC(dai, node, ToF, error)) + .def_readonly("initialConfig", &ToF::initialConfig, DOC(dai, node, ToF, initialConfig), DOC(dai, node, ToF, initialConfig)) + ; + // ALIAS daiNodeModule.attr("ToF").attr("Properties") = tofProperties; } diff --git a/src/pipeline/node/UVCBindings.cpp b/src/pipeline/node/UVCBindings.cpp new file mode 100644 index 000000000..81f180658 --- /dev/null +++ b/src/pipeline/node/UVCBindings.cpp @@ -0,0 +1,50 @@ + +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/UVC.hpp" + +void bind_uvc(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ uvcProperties(m, "UVCProperties", DOC(dai, UVCProperties)); + auto uvc = ADD_NODE(UVC); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + uvcProperties + .def_readwrite("gpioInit", &UVCProperties::gpioInit) + .def_readwrite("gpioStreamOn", &UVCProperties::gpioStreamOn) + .def_readwrite("gpioStreamOff", &UVCProperties::gpioStreamOff) + ; + + // UVC node + uvc + .def_readonly("input", &UVC::input, DOC(dai, node, UVC, input)) + .def("setGpiosOnInit", &UVC::setGpiosOnInit, py::arg("list"), DOC(dai, node, UVC, setGpiosOnInit)) + .def("setGpiosOnStreamOn", &UVC::setGpiosOnStreamOn, py::arg("list"), DOC(dai, node, UVC, setGpiosOnStreamOn)) + .def("setGpiosOnStreamOff", &UVC::setGpiosOnStreamOff, py::arg("list"), DOC(dai, node, UVC, setGpiosOnStreamOff)) + ; + + // ALIAS + daiNodeModule.attr("UVC").attr("Properties") = uvcProperties; + +} + diff --git a/src/pybind11_common.hpp b/src/pybind11_common.hpp index 6dbc0967b..6478ae5e2 100644 --- a/src/pybind11_common.hpp +++ b/src/pybind11_common.hpp @@ -13,6 +13,7 @@ #include #include #include +#include // Include docstring file #include "docstring.hpp" diff --git a/src/utility/SpanBindings.hpp b/src/utility/SpanBindings.hpp new file mode 100644 index 000000000..859f1df31 --- /dev/null +++ b/src/utility/SpanBindings.hpp @@ -0,0 +1,170 @@ +#pragma once + +#include +#include + +#include "depthai/utility/span.hpp" + +namespace pybind11 { +namespace detail { + +template +struct span_name_maker { + template + static constexpr auto make(const T &t) { + return concat(t, span_name_maker::make(t)); + } +}; + +template <> +struct span_name_maker<1> { + template + static constexpr auto make(const T &t) { + return t; + } +}; + +// span with fixed size converts to a tuple +template struct type_caster> { + using span_type = typename dai::span; + using value_conv = make_caster; + using value_type = typename std::remove_cv::type; + + value_type backing_array[Extent] = {}; + + PYBIND11_TYPE_CASTER(span_type, _("Tuple[") + span_name_maker::make(value_conv::name) + _("]")); + + type_caster() : value(backing_array) {} + + bool load(handle src, bool convert) { + if (!isinstance(src) || isinstance(src)) + return false; + auto s = reinterpret_borrow(src); + if (s.size() != Extent) + return false; + size_t i = 0; + for (auto it : s) { + value_conv conv; + if (!conv.load(it, convert)) + return false; + backing_array[i] = cast_op(std::move(conv)); + i++; + } + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + if (!std::is_lvalue_reference::value) + policy = return_value_policy_override::policy(policy); + tuple l(Extent); + size_t index = 0; + for (auto &&value : src) { + auto value_ = reinterpret_steal( + value_conv::cast(forward_like(value), policy, parent)); + if (!value_) + return handle(); + PyTuple_SET_ITEM(l.ptr(), (ssize_t)index++, + value_.release().ptr()); // steals a reference + } + return l.release(); + } +}; + + +// span with dynamic extent +template struct type_caster> { + using span_type = typename dai::span; + using value_conv = make_caster; + using value_type = typename std::remove_cv::type; + PYBIND11_TYPE_CASTER(span_type, _("List[") + value_conv::name + _("]")); + + std::vector vec; + bool load(handle src, bool convert) { + if (!isinstance(src) || isinstance(src)) + return false; + auto s = reinterpret_borrow(src); + vec.reserve(s.size()); + for (auto it : s) { + value_conv conv; + if (!conv.load(it, convert)) + return false; + vec.push_back(cast_op(std::move(conv))); + } + value = span_type(vec.data(), vec.size()); + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + if (!std::is_lvalue_reference::value) + policy = return_value_policy_override::policy(policy); + list l(src.size()); + size_t index = 0; + for (auto &&value : src) { + auto value_ = reinterpret_steal( + value_conv::cast(forward_like(value), policy, parent)); + if (!value_) + return handle(); + PyList_SET_ITEM(l.ptr(), (ssize_t)index++, + value_.release().ptr()); // steals a reference + } + return l.release(); + } +}; + +// span specialization: accepts any readonly buffers +template <> struct type_caster> { + using span_type = typename dai::span; + PYBIND11_TYPE_CASTER(span_type, _("buffer")); + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto buf = reinterpret_borrow(src); + auto req = buf.request(); + if (req.ndim != 1) { + return false; + } + + value = span_type((const uint8_t*)req.ptr, req.size*req.itemsize); + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + return bytes((char*)src.data(), src.size()).release(); + } +}; + +// span specialization: writeable buffer +template <> struct type_caster> { + using span_type = typename dai::span; + PYBIND11_TYPE_CASTER(dai::span, _("buffer")); + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto buf = reinterpret_borrow(src); + auto req = buf.request(true); // buffer must be writeable + if (req.ndim != 1) { + return false; + } + + value = dai::span((uint8_t*)req.ptr, req.size*req.itemsize); + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + // TODO: should this be a memoryview instead? + return bytes((char*)src.data(), src.size()).release(); + } +}; + +} // namespace detail +} // namespace pybind11 \ No newline at end of file diff --git a/utilities/README.md b/utilities/README.md index 2f91deb7a..3b7259bd1 100644 --- a/utilities/README.md +++ b/utilities/README.md @@ -22,3 +22,30 @@ To build standalone executable issue the following command: ```sh pyinstaller --onefile -w --icon=assets/icon.ico --add-data="assets/icon.ico;assets" --add-data="assets/icon.png;assets" device_manager.py ``` + +Optionally, append `--runtime-tmpdir [path or .]` to modify where the temporary directory should be created when launched. + + +## Cam Test +Run: +```sh +python3 cam_test.py +``` +To start cam test with GUI. +Run cam_test.py with args to start cam test without GUI: + +### Bundled executable +Requirements: +``` +# Linux/macOS +python3 -m pip install pyinstaller +# Windows +python -m pip install pyinstaller +``` + +To build a bundled executable issue the following command: +```sh +pyinstaller -w cam_test.py --hidden-import PyQt5.sip +``` + +The executable will be located in `dist/cam_test` folder. diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 1932e6e7a..bae88c9f6 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -55,9 +55,9 @@ def socket_type_pair(arg): default=[['rgb', True], ['left', False], ['right', False], ['camd', True]], help="Which camera sockets to enable, and type: c[olor] / m[ono]. " "E.g: -cams rgb,m right,c . Default: rgb,c left,m right,m camd,c") -parser.add_argument('-mres', '--mono-resolution', type=int, default=800, choices={480, 400, 720, 800}, +parser.add_argument('-mres', '--mono-resolution', type=int, default=800, choices={480, 400, 720, 800, 1200, 4000, 4224}, help="Select mono camera resolution (height). Default: %(default)s") -parser.add_argument('-cres', '--color-resolution', default='1080', choices={'720', '800', '1080', '1200', '1500', '1520', '1560', '4000', '4k', '5mp', '12mp', '48mp'}, +parser.add_argument('-cres', '--color-resolution', default='1080', choices={'720', '800', '1080', '1200', '1500', '1520', '1560', '4000', '4k', '5mp', '12mp', '13mp', '48mp'}, help="Select color camera resolution / height. Default: %(default)s") parser.add_argument('-rot', '--rotate', const='all', choices={'all', 'rgb', 'mono'}, nargs="?", help="Which cameras to rotate 180 degrees. All if not filtered") @@ -83,9 +83,9 @@ def socket_type_pair(arg): print("DepthAI path:", dai.__file__) cam_socket_opts = { - 'rgb' : dai.CameraBoardSocket.RGB, # Or CAM_A - 'left' : dai.CameraBoardSocket.LEFT, # Or CAM_B - 'right': dai.CameraBoardSocket.RIGHT, # Or CAM_C + 'rgb' : dai.CameraBoardSocket.CAM_A, + 'left' : dai.CameraBoardSocket.CAM_B, + 'right': dai.CameraBoardSocket.CAM_C, 'camd' : dai.CameraBoardSocket.CAM_D, 'came' : dai.CameraBoardSocket.CAM_E, 'camf' : dai.CameraBoardSocket.CAM_F, @@ -95,6 +95,9 @@ def socket_type_pair(arg): 'RGB' : 'rgb', 'LEFT' : 'left', 'RIGHT': 'right', + 'CAM_A': 'rgb', + 'CAM_B': 'left', + 'CAM_C': 'right', 'CAM_D': 'camd', 'CAM_E': 'came', 'CAM_F': 'camf', @@ -115,6 +118,8 @@ def socket_type_pair(arg): 720: dai.MonoCameraProperties.SensorResolution.THE_720_P, 800: dai.MonoCameraProperties.SensorResolution.THE_800_P, 1200: dai.MonoCameraProperties.SensorResolution.THE_1200_P, + 4000: dai.MonoCameraProperties.SensorResolution.THE_4000X3000, + 4224: dai.MonoCameraProperties.SensorResolution.THE_4224X3136, } color_res_opts = { @@ -129,6 +134,7 @@ def socket_type_pair(arg): '4k': dai.ColorCameraProperties.SensorResolution.THE_4_K, '5mp': dai.ColorCameraProperties.SensorResolution.THE_5_MP, '12mp': dai.ColorCameraProperties.SensorResolution.THE_12_MP, + '13mp': dai.ColorCameraProperties.SensorResolution.THE_13_MP, '48mp': dai.ColorCameraProperties.SensorResolution.THE_48_MP, } diff --git a/utilities/cam_test_gui.py b/utilities/cam_test_gui.py new file mode 100644 index 000000000..0617d8b90 --- /dev/null +++ b/utilities/cam_test_gui.py @@ -0,0 +1,375 @@ +from typing import List +from PyQt5 import QtCore, QtGui, QtWidgets +import depthai as dai +import sys +import signal +import os +import psutil + + +class CamTestGui: + + CAM_SOCKET_OPTIONS = ["rgb", "left", "right", "camd"] + CAM_TYPE_OPTIONS = ["color", "mono"] + MONO_RESOLUTION_OPTIONS = ["400", "480", "720", "800"] + COLOR_RESOLUTION_OPTIONS = ["720", "800", + "1080", "1200", "4k", "5mp", "12mp", "48mp"] + ROTATE_OPTIONS = ["disabled", "all", "rgb", "mono"] + DEPTHAI_CONNECT_TIMEOUT_DEFAULT = 30000 + DEPTHAI_BOOTUP_TIMEOUT_DEFAULT = 30000 + + def remove_camera(self, layout: QtWidgets.QHBoxLayout): + for i in reversed(range(layout.count())): + layout.itemAt(i).widget().setParent(None) + self.cameras_list.removeItem(layout) + + def add_camera(self, camera: str = None, camera_type: str = None): + layout = QtWidgets.QHBoxLayout() + cam_combo = QtWidgets.QComboBox() + cam_combo.addItems(self.CAM_SOCKET_OPTIONS) + cam_combo.setCurrentIndex( + self.CAM_SOCKET_OPTIONS.index(camera) if camera else 0) + layout.addWidget(cam_combo) + cam_type_combo = QtWidgets.QComboBox() + cam_type_combo.addItems(self.CAM_TYPE_OPTIONS) + cam_type_combo.setCurrentIndex( + self.CAM_TYPE_OPTIONS.index(camera_type) if camera_type else 0) + layout.addWidget(cam_type_combo) + self.cameras_list.addLayout(layout) + remove_button = QtWidgets.QPushButton("Remove") + remove_button.clicked.connect(lambda: self.remove_camera(layout)) + layout.addWidget(remove_button) + + def set_default_cameras(self): + self.add_camera("rgb", "color") + self.add_camera("left", "mono") + self.add_camera("right", "mono") + self.add_camera("camd", "color") + + def __init__(self, app: "Application"): + self.app = app + self.app.setWindowTitle("Camera Test") + self.main_widget = QtWidgets.QWidget() + self.scroll_widget = QtWidgets.QScrollArea() + self.scroll_widget.setWidget(self.main_widget) + self.scroll_widget.setWidgetResizable(True) + self.scroll_widget.setMinimumHeight(500) + self.main_layout = QtWidgets.QVBoxLayout() + self.main_widget.setLayout(self.main_layout) + self.app.setCentralWidget(self.scroll_widget) + self.label_cameras = QtWidgets.QLabel("Cameras") + self.main_layout.addWidget(self.label_cameras) + + self.cameras_list = QtWidgets.QVBoxLayout() + self.main_layout.addLayout(self.cameras_list) + self.set_default_cameras() + + self.add_cam_button = QtWidgets.QPushButton("Add Camera") + self.add_cam_button.clicked.connect(self.add_camera) + self.main_layout.addWidget(self.add_cam_button) + + self.mono_resolution_label = QtWidgets.QLabel("Mono Resolution") + self.main_layout.addWidget(self.mono_resolution_label) + self.mono_resolution_combo = QtWidgets.QComboBox() + self.mono_resolution_combo.addItems(self.MONO_RESOLUTION_OPTIONS) + self.main_layout.addWidget(self.mono_resolution_combo) + self.mono_resolution_combo.setCurrentIndex(3) + + self.label_color_resolution = QtWidgets.QLabel("Color Resolution") + self.main_layout.addWidget(self.label_color_resolution) + self.combo_color_resolution = QtWidgets.QComboBox() + self.combo_color_resolution.addItems(self.COLOR_RESOLUTION_OPTIONS) + self.main_layout.addWidget(self.combo_color_resolution) + self.combo_color_resolution.setCurrentIndex(2) + + self.label_rotate = QtWidgets.QLabel("Rotate") + self.main_layout.addWidget(self.label_rotate) + self.combo_rotate = QtWidgets.QComboBox() + self.combo_rotate.addItems(self.ROTATE_OPTIONS) + self.main_layout.addWidget(self.combo_rotate) + + self.label_fps = QtWidgets.QLabel("FPS") + self.main_layout.addWidget(self.label_fps) + self.spin_fps = QtWidgets.QSpinBox() + self.spin_fps.setMinimum(1) + self.spin_fps.setMaximum(120) + self.spin_fps.setValue(30) + self.main_layout.addWidget(self.spin_fps) + + self.label_isp3afps = QtWidgets.QLabel("ISP3 AFPS") + self.main_layout.addWidget(self.label_isp3afps) + self.spin_isp3afps = QtWidgets.QSpinBox() + self.spin_isp3afps.setMinimum(0) + self.spin_isp3afps.setMaximum(120) + self.spin_isp3afps.setValue(0) + self.main_layout.addWidget(self.spin_isp3afps) + + self.label_isp_downscale = QtWidgets.QLabel("ISP Downscale") + self.main_layout.addWidget(self.label_isp_downscale) + self.spin_isp_downscale = QtWidgets.QSpinBox() + self.spin_isp_downscale.setMinimum(1) + self.spin_isp_downscale.setMaximum(4) + self.spin_isp_downscale.setValue(1) + self.main_layout.addWidget(self.spin_isp_downscale) + + self.label_resizable_windows = QtWidgets.QLabel("Resizable Windows") + self.main_layout.addWidget(self.label_resizable_windows) + self.check_resizable_windows = QtWidgets.QCheckBox() + self.main_layout.addWidget(self.check_resizable_windows) + + self.label_camera_tuning = QtWidgets.QLabel("Camera Tuning") + self.main_layout.addWidget(self.label_camera_tuning) + self.camera_tuning_path = QtWidgets.QLineEdit() + self.main_layout.addWidget(self.camera_tuning_path) + + self.label_connect_timeout = QtWidgets.QLabel("Connect Timeout (ms)") + self.main_layout.addWidget(self.label_connect_timeout) + self.spin_connect_timeout = QtWidgets.QSpinBox() + self.spin_connect_timeout.setMinimum(1) + self.spin_connect_timeout.setMaximum(60000) + self.spin_connect_timeout.setValue( + self.DEPTHAI_CONNECT_TIMEOUT_DEFAULT) + self.main_layout.addWidget(self.spin_connect_timeout) + + self.label_boot_timeout = QtWidgets.QLabel("Bootup Timeout (ms)") + self.main_layout.addWidget(self.label_boot_timeout) + self.spin_boot_timeout = QtWidgets.QSpinBox() + self.spin_boot_timeout.setMinimum(1) + self.spin_boot_timeout.setMaximum(60000) + self.spin_boot_timeout.setValue(self.DEPTHAI_BOOTUP_TIMEOUT_DEFAULT) + self.main_layout.addWidget(self.spin_boot_timeout) + + self.label_available_devices = QtWidgets.QLabel("Available Devices") + self.main_layout.addWidget(self.label_available_devices) + self.available_devices_combo = QtWidgets.QComboBox() + self.main_layout.addWidget(self.available_devices_combo) + + self.connect_layout = QtWidgets.QHBoxLayout() + self.connect_button = QtWidgets.QPushButton("Connect") + self.connect_button.clicked.connect(self.app.connect) + self.connect_layout.addWidget(self.connect_button) + self.check_auto_mode = QtWidgets.QCheckBox("Auto Mode") + self.check_auto_mode.setToolTip( + "Whenever a device is available, connect to it automatically") + self.connect_layout.addWidget(self.check_auto_mode) + self.check_auto_mode.setChecked(False) + self.main_layout.addLayout(self.connect_layout) + + self.disconnect_button = QtWidgets.QPushButton("Disconnect") + self.disconnect_button.clicked.connect(self.app.disconnect) + self.main_layout.addWidget(self.disconnect_button) + self.disconnect_button.setHidden(True) + + def handle_automode_changed(self, state): + self.disconnect_button.setHidden(bool(state)) + self.connect_button.setHidden(bool(state)) + + def handle_disconnect(self): + self.available_devices_combo.clear() + if not self.check_auto_mode.isChecked(): + self.connect_button.setDisabled(True) + self.disconnect_button.setDisabled(True) + self.disconnect_button.setHidden(True) + self.connect_button.setHidden(False) + self.add_cam_button.setDisabled(False) + self.mono_resolution_combo.setDisabled(False) + self.combo_color_resolution.setDisabled(False) + self.combo_rotate.setDisabled(False) + self.spin_fps.setDisabled(False) + self.spin_isp3afps.setDisabled(False) + self.spin_isp_downscale.setDisabled(False) + self.check_resizable_windows.setDisabled(False) + self.camera_tuning_path.setDisabled(False) + self.available_devices_combo.setDisabled(False) + for i in range(self.cameras_list.count()): + self.cameras_list.itemAt(i).itemAt(0).widget().setDisabled(False) + self.cameras_list.itemAt(i).itemAt(1).widget().setDisabled(False) + self.cameras_list.itemAt(i).itemAt(2).widget().setDisabled(False) + self.spin_connect_timeout.setDisabled(False) + self.spin_boot_timeout.setDisabled(False) + + def handle_connect(self): + self.spin_boot_timeout.setDisabled(True) + self.spin_connect_timeout.setDisabled(True) + if not self.check_auto_mode.isChecked(): + self.connect_button.setDisabled(True) + self.disconnect_button.setDisabled(False) + self.disconnect_button.setHidden(False) + self.connect_button.setHidden(True) + self.add_cam_button.setDisabled(True) + self.mono_resolution_combo.setDisabled(True) + self.combo_color_resolution.setDisabled(True) + self.combo_rotate.setDisabled(True) + self.spin_fps.setDisabled(True) + self.spin_isp3afps.setDisabled(True) + self.spin_isp_downscale.setDisabled(True) + self.check_resizable_windows.setDisabled(True) + self.camera_tuning_path.setDisabled(True) + self.available_devices_combo.setDisabled(True) + for i in range(self.cameras_list.count()): + self.cameras_list.itemAt(i).itemAt(0).widget().setDisabled(True) + self.cameras_list.itemAt(i).itemAt(1).widget().setDisabled(True) + self.cameras_list.itemAt(i).itemAt(2).widget().setDisabled(True) + + +class WorkerSignals(QtCore.QObject): + finished = QtCore.pyqtSignal(list) + + +class Worker(QtCore.QRunnable): + + def __init__(self, fn, *args, **kwargs): + super().__init__() + self.fn = fn + self.args = args + self.kwargs = kwargs + self.signals = WorkerSignals() + + @QtCore.pyqtSlot() + def run(self): + result = self.fn(*self.args, **self.kwargs) + self.signals.finished.emit(result) + + +class Application(QtWidgets.QMainWindow): + + def __init__(self): + super().__init__() + self.available_devices: List[dai.DeviceInfo] = [] + self.ui = CamTestGui(self) + self.query_devices_timer = QtCore.QTimer() + self.query_devices_timer.timeout.connect(self.query_devices) + self.query_devices_timer.start(2000) + self.query_devices() + + self.test_process_pid = None + + # Once the test process is started, periodically check if it's still running (catches eg. camera unplugged) + self.check_test_process_timer = QtCore.QTimer() + self.check_test_process_timer.timeout.connect(self.check_test_process) + + self.ui.check_auto_mode.stateChanged.connect(self.automode_changed) + + def closeEvent(self, a0: QtGui.QCloseEvent) -> None: + if self.test_process_pid: + os.kill(self.test_process_pid, signal.SIGINT) + return super().closeEvent(a0) + + def automode_changed(self, state): + self.ui.handle_automode_changed(state) + if not state: + self.disconnect() + + def construct_args_from_gui(self) -> List[str]: + if not self.available_devices: + return [] + self.device = self.available_devices[self.ui.available_devices_combo.currentIndex( + )].mxid + cmd = [] + cmd.append("--cameras") + for i in range(self.ui.cameras_list.count()): + hbox = self.ui.cameras_list.itemAt(i) + cam_combo = hbox.itemAt(0) + cam_type_combo = hbox.itemAt(1) + cam = cam_combo.widget().currentText() + cam_type = cam_type_combo.widget().currentText() + cmd.append(f"{cam},{cam_type[0]}") + cmd.append("-mres") + cmd.append(self.ui.mono_resolution_combo.currentText()) + cmd.append("-cres") + cmd.append(self.ui.combo_color_resolution.currentText()) + if self.ui.combo_rotate.currentText() != "disabled": + cmd.append("-rot") + cmd.append(self.ui.combo_rotate.currentText()) + cmd.append("-fps") + cmd.append(str(self.ui.spin_fps.value())) + cmd.append("-isp3afps") + cmd.append(str(self.ui.spin_isp3afps.value())) + cmd.append("-ds") + cmd.append(str(self.ui.spin_isp_downscale.value())) + if self.ui.check_resizable_windows.isChecked(): + cmd.append("-rs") + if self.ui.camera_tuning_path.text(): + cmd.append("-tun") + cmd.append(self.ui.camera_tuning_path.text()) + cmd.append("--device") + cmd.append(self.device) + cmd.append("--connection-timeout") + cmd.append(str(self.ui.spin_connect_timeout.value())) + cmd.append("--boot-timeout") + cmd.append(str(self.ui.spin_boot_timeout.value())) + return cmd + + def check_test_process(self): + if self.test_process_pid and psutil.pid_exists(self.test_process_pid): + return + self.test_process_pid = None + self.disconnect() + self.check_test_process_timer.stop() + + def connect(self): + args = self.construct_args_from_gui() + if not args: + return + + started_successfully = False + self.test_process = QtCore.QProcess() + # Start detached process with the function that also returns the PID + if getattr(sys, 'frozen', False): + started_successfully, self.test_process_pid = self.test_process.startDetached( + sys.executable, args, "") + else: + started_successfully, self.test_process_pid = self.test_process.startDetached( + sys.executable, sys.argv + args, "") + if not started_successfully: + self.test_process_pid = None + self.disconnect() + return + self.query_devices_timer.stop() + self.check_test_process_timer.start(1000) + self.ui.handle_connect() + + def disconnect(self): + if self.test_process_pid: + try: + os.kill(self.test_process_pid, signal.SIGINT) + except OSError: + self.test_process_pid = None + self.test_process_pid = None + self.query_devices_timer.start() + self.ui.handle_disconnect() + + def query_devices(self): + self.query_devices_timer.stop() + pool = QtCore.QThreadPool.globalInstance() + query_devices_worker = Worker(dai.Device.getAllAvailableDevices) + query_devices_worker.signals.finished.connect( + self.on_finish_query_devices) + pool.start(query_devices_worker) + + def on_finish_query_devices(self, result): + current_device = self.ui.available_devices_combo.currentText() + self.ui.available_devices_combo.clear() + self.available_devices = result + self.ui.available_devices_combo.addItems( + list(map(lambda d: f"{d.name} ({d.getMxId()})", self.available_devices))) + self.query_devices_timer.start() + if self.available_devices: + if current_device: + index = self.ui.available_devices_combo.findText( + current_device) + if index != -1: + self.ui.available_devices_combo.setCurrentIndex(index) + if self.ui.check_auto_mode.isChecked(): + self.connect() + self.ui.connect_button.setDisabled(False) + else: + self.ui.connect_button.setDisabled(True) + + +def main(): + signal.signal(signal.SIGINT, signal.SIG_DFL) + app = QtWidgets.QApplication(sys.argv) + application = Application() + application.show() + sys.exit(app.exec_()) diff --git a/utilities/device_manager.py b/utilities/device_manager.py index ef2a4664e..99814f10b 100755 --- a/utilities/device_manager.py +++ b/utilities/device_manager.py @@ -10,6 +10,7 @@ from typing import Dict import platform import os +import numpy if USE_OPENCV: # import cv2 @@ -20,10 +21,14 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +PLATFORM_ICON_PATH = None if platform.system() == 'Windows': - sg.set_global_icon(f'{SCRIPT_DIR}/assets/icon.ico') + PLATFORM_ICON_PATH = f'{SCRIPT_DIR}/assets/icon.ico' else: - sg.set_global_icon(f'{SCRIPT_DIR}/assets/icon.png') + PLATFORM_ICON_PATH = f'{SCRIPT_DIR}/assets/icon.png' + +# Apply icon globally +sg.set_global_icon(PLATFORM_ICON_PATH) CONF_TEXT_POE = ['ipTypeText', 'ipText', 'maskText', 'gatewayText', 'dnsText', 'dnsAltText', 'networkTimeoutText', 'macText'] CONF_INPUT_POE = ['staticBut', 'dynamicBut', 'ip', 'mask', 'gateway', 'dns', 'dnsAlt', 'networkTimeout', 'mac'] @@ -38,27 +43,35 @@ def PrintException(): filename = f.f_code.co_filename print('Exception in {}, line {}; {}'.format(filename, lineno, exc_obj)) -def check_ip(s: str, req = True): +def Popup(msg, window): + main_window_location = window.CurrentLocation() + main_window_size = window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - 50) // 2, + main_window_location[1] + (main_window_size[1] - 50) // 2) + return sg.Popup(msg, location=centered_location) + +def check_ip(window, s: str, req = True): if s == "": return not req spl = s.split(".") if len(spl) != 4: - sg.Popup("Wrong IP format.\nValue should be similar to 255.255.255.255") + Popup("Wrong IP format.\nValue should be similar to 255.255.255.255", window=window) return False for num in spl: if 255 < int(num): - sg.Popup("Wrong IP format.\nValues can not be above 255!") + Popup("Wrong IP format.\nValues can not be above 255!", window=window) return False return True -def check_mac(s): +def check_mac(window, s): if s.count(":") != 5: - sg.Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF") + Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF", window=window) return False for i in s.split(":"): for j in i: if j > "F" or (j < "A" and not j.isdigit()) or len(i) != 2: - sg.Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF") + Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF", window=window) return False return True @@ -103,20 +116,17 @@ def __init__(self, text): self.ok = False layout = [ [sg.Text(text)], - [sg.Submit(button_text="Yes"), sg.Cancel(button_text="No")], + [sg.Submit(), sg.Cancel()], ] self.window = sg.Window("Are You Sure?", layout, size=(450,150), modal=True, finalize=True) def wait(self): event, values = self.window.Read() self.window.close() - if values is not None: - return str(event) == "Submit" - else: - return False + return str(event) == "Submit" class SelectIP: - def __init__(self): + def __init__(self, window): self.ok = False layout = [ [sg.Text("Specify the custom IP of the OAK PoE\ncamera you want to connect to")], @@ -127,15 +137,24 @@ def __init__(self): [sg.Submit(), sg.Cancel()], ] self.window = sg.Window("Specify IP", layout, size=(300,110), modal=True, finalize=True) + + main_window_location = window.CurrentLocation() + main_window_size = window.size + new_window_size = self.window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - new_window_size[0]) // 2, + main_window_location[1] + (main_window_size[1] - new_window_size[1]) // 2) + self.window.move(*centered_location) + def wait(self): event, values = self.window.Read() self.window.close() - if str(event) == "Cancel" or values is None or not check_ip(values["ip"]): + if str(event) == "Cancel" or values is None or not check_ip(self.window, values["ip"]): return False, "" return True, values["ip"] class SearchDevice: - def __init__(self): + def __init__(self, window): self.infos = [] layout = [ [sg.Text("Select an OAK camera you would like to connect to.", font=('Arial', 10, 'bold'))], @@ -159,12 +178,22 @@ def __init__(self): [sg.Button('Search', size=(15, 2), font=('Arial', 10, 'bold'))], ] self.window = sg.Window("Select Device", layout, size=(550,375), modal=True, finalize=True) + + main_window_location = window.CurrentLocation() + main_window_size = window.size + new_window_size = self.window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - new_window_size[0]) // 2, + main_window_location[1] + (main_window_size[1] - new_window_size[1]) // 2) + self.window.move(*centered_location) + self.search_devices() def search_devices(self): self.infos = dai.XLinkConnection.getAllConnectedDevices() if not self.infos: - sg.Popup("No devices found.") + pass + # sg.Popup("No devices found.") else: rows = [] for info in self.infos: @@ -188,17 +217,22 @@ def wait(self) -> dai.DeviceInfo: return deviceSelected def flashBootloader(bl: dai.DeviceBootloader, device: dai.DeviceInfo, type: dai.DeviceBootloader.Type): - factoryBlWarningMessage = """Main Bootloader type or version doesn't support User Bootloader flashing. -Main (factory) bootloader will be updated instead. + userBlWarningMessage = """Updating bootloader can soft-brick a device. +Proceed with caution""" + factoryBlWarningMessage = """Factory Bootloader type or version doesn't support User Bootloader flashing. +Factory bootloader will be updated instead. Proceed with caution """ try: if bl.isUserBootloaderSupported(): - pr = Progress('Flashing...') - progress = lambda p : pr.update(p) - bl.flashUserBootloader(progress) - pr.finish("Flashed newest User Bootloader version.") + if AreYouSure(text=userBlWarningMessage).wait(): + pr = Progress('Flashing...') + progress = lambda p : pr.update(p) + bl.flashUserBootloader(progress) + pr.finish("Flashed newest User Bootloader version.") + else: + return False elif AreYouSure(text=factoryBlWarningMessage).wait(): bl.close() pr = Progress('Connecting...') @@ -255,21 +289,20 @@ def factoryReset(device: dai.DeviceInfo, type: dai.DeviceBootloader.Type): def connectAndStartStreaming(dev): - # OpenCV - if USE_OPENCV: + with dai.Device(dev) as d: # Create pipeline pipeline = dai.Pipeline() + # OpenCV + if USE_OPENCV: + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setIspScale(1,3) + videnc = pipeline.create(dai.node.VideoEncoder) + videnc.setDefaultProfilePreset(camRgb.getFps(), videnc.Properties.Profile.MJPEG) + xout = pipeline.create(dai.node.XLinkOut) + xout.setStreamName("mjpeg") + camRgb.video.link(videnc.input) + videnc.bitstream.link(xout.input) - camRgb = pipeline.create(dai.node.ColorCamera) - camRgb.setIspScale(1,3) - videnc = pipeline.create(dai.node.VideoEncoder) - videnc.setDefaultProfilePreset(camRgb.getFps(), videnc.Properties.Profile.MJPEG) - xout = pipeline.create(dai.node.XLinkOut) - xout.setStreamName("mjpeg") - camRgb.video.link(videnc.input) - videnc.bitstream.link(xout.input) - - with dai.Device(pipeline, dev) as d: while not d.isClosed(): mjpeg = d.getOutputQueue('mjpeg').get() frame = cv2.imdecode(mjpeg.getData(), cv2.IMREAD_UNCHANGED) @@ -277,21 +310,22 @@ def connectAndStartStreaming(dev): if cv2.waitKey(1) == ord('q'): cv2.destroyWindow('Color Camera') break - else: - # Create pipeline (no opencv) - pipeline = dai.Pipeline() - camRgb = pipeline.create(dai.node.ColorCamera) - camRgb.setIspScale(1,3) - camRgb.setPreviewSize(camRgb.getIspSize()) - camRgb.setColorOrder(camRgb.Properties.ColorOrder.RGB) - - xout = pipeline.create(dai.node.XLinkOut) - xout.input.setQueueSize(2) - xout.input.setBlocking(False) - xout.setStreamName("color") - camRgb.preview.link(xout.input) - - with dai.Device(pipeline, dev) as d: + else: + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setIspScale(1,3) + firstSensor = d.getConnectedCameraFeatures()[0] + camRgb.setPreviewSize(firstSensor.width // 3, firstSensor.height // 3) + camRgb.setColorOrder(camRgb.Properties.ColorOrder.RGB) + + xout = pipeline.create(dai.node.XLinkOut) + xout.input.setQueueSize(2) + xout.input.setBlocking(False) + xout.setStreamName("color") + camRgb.preview.link(xout.input) + + # Start pipeline + d.startPipeline(pipeline) + frame = d.getOutputQueue('color', 2, False).get() width, height = frame.getWidth(), frame.getHeight() @@ -403,12 +437,6 @@ def deviceStateTxt(state: dai.XLinkDeviceState) -> str: sg.Text("-version-", key="version", size=(30, 1)), sg.VSeparator(), sg.Text("-version-", key="commit", size=(31, 1)) - ], - [sg.HSeparator()], - [ - sg.Text("", size=(7, 2)), - sg.Button("Flash Newest Bootloader", size=(20, 2), font=('Arial', 10, 'bold'), disabled=True, - button_color='#FFA500'), ] ] @@ -513,12 +541,17 @@ def deviceStateTxt(state: dai.XLinkDeviceState) -> str: ], [sg.HSeparator()], [ + sg.Button("Update Bootloader", size=(20, 2), font=('Arial', 10, 'bold'), disabled=True, + button_color='#FFA500'), sg.Button("Flash Factory Bootloader", size=(20, 2), font=('Arial', 10, 'bold'), disabled=True, button_color='#FFA500', key='flashFactoryBootloader'), + ], + [sg.HSeparator()], + [ sg.Button("Factory reset", size=(17, 2), font=('Arial', 10, 'bold'), disabled=True, button_color='#FFA500'), sg.Button("Boot into USB\nRecovery mode", size=(20, 2), font=('Arial', 10, 'bold'), disabled=True, key='recoveryMode', button_color='#FFA500') - ], + ] ] @@ -539,7 +572,7 @@ class DeviceManager: def __init__(self) -> None: self.window = sg.Window(title="Device Manager", - icon="assets/icon.png", + icon=PLATFORM_ICON_PATH, layout=layout, size=(645, 380), finalize=True # So we can do First search for devices @@ -555,7 +588,7 @@ def isPoE(self) -> bool: return self.bl.getType() == dai.DeviceBootloader.Type.NETWORK except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', self.window) def isUsb(self) -> bool: return not self.isPoE() @@ -576,7 +609,7 @@ def run(self) -> None: device = self.device if deviceStateTxt(device.state) == "BOOTED": # device is already booted somewhere else - sg.Popup("Device is already booted somewhere else!") + Popup("Device is already booted somewhere else!", self.window) else: self.resetGui() self.bl = connectToDevice(device) @@ -587,7 +620,7 @@ def run(self) -> None: self.window.Element('progress').update("No device selected.") elif event == "Search": self.getDevices() # Re-search devices for dropdown - selDev = SearchDevice() + selDev = SearchDevice(window=self.window) di = selDev.wait() if di is not None: self.resetGui() @@ -600,7 +633,7 @@ def run(self) -> None: self.getConfigs() self.unlockConfig() elif event == "Specify IP": - select = SelectIP() + select = SelectIP(window=self.window) ok, ip = select.wait() if ok: self.resetGui() @@ -614,7 +647,9 @@ def run(self) -> None: if self.bl is None: continue self.getConfigs() self.unlockConfig() - elif event == "Flash Newest Bootloader": + + # Danger + elif event == "Update Bootloader": # Use current type if flashBootloader(self.bl, self.device, self.bl.getType()): # Device will reboot, close previous and reset GUI @@ -623,8 +658,6 @@ def run(self) -> None: self.getDevices() else: print("Flashing bootloader canceled.") - - # Danger elif event == "flashFactoryBootloader": sel = SelectBootloader(['AUTO', 'USB', 'NETWORK'], "Select bootloader type to flash.") ok, type = sel.wait() @@ -654,14 +687,14 @@ def run(self) -> None: print("Factory reset cancelled.") elif event == "Flash configuration": - self.flashConfig() - self.getConfigs() - self.resetGui() - if self.isUsb(): - self.unlockConfig() - else: - self.devices.clear() - self.window.Element('devices').update("Search for devices", values=[]) + if self.flashConfig() is not None: + self.getConfigs() + self.resetGui() + if self.isUsb(): + self.unlockConfig() + else: + self.devices.clear() + self.window.Element('devices').update("Search for devices", values=[]) elif event == "Clear configuration": self.clearConfig() self.getConfigs() @@ -676,7 +709,7 @@ def run(self) -> None: confJson = self.bl.readConfigData() sg.popup_scrolled(confJson, title='Configuration') except Exception as ex: - sg.popup(f'No existing config to view ({ex})') + Popup(f'No existing config to view ({ex})', self.window) elif event == "Flash application": file = sg.popup_get_file("Select .dap file", file_types=(('DepthAI Application Package', '*.dap'), ('All Files', '*.* *'))) @@ -684,9 +717,9 @@ def run(self) -> None: elif event == "Remove application": try: self.bl.flashClear() - sg.popup(f'Successfully removed application') + Popup(f'Successfully removed application', self.window) except Exception as ex: - sg.popup(f"Couldn't remove application ({ex})") + sg.popup(f"Couldn't remove application ({ex})", self.window) elif event.startswith("_unique_configBtn"): self.window['-COL1-'].update(visible=False) @@ -712,7 +745,7 @@ def run(self) -> None: elif event == "recoveryMode": if recoveryMode(self.bl): - sg.Popup(f'Device successfully put into USB recovery mode.') + Popup(f'Device successfully put into USB recovery mode.', self.window) # Device will reboot, close previous and reset GUI self.closeDevice() self.resetGui() @@ -826,7 +859,7 @@ def unlockConfig(self): for el in CONF_TEXT_USB: self.window[el].update(text_color="black") - self.window['Flash Newest Bootloader'].update(disabled=False) + self.window['Update Bootloader'].update(disabled=False) self.window['flashFactoryBootloader'].update(disabled=False) self.window['Flash configuration'].update(disabled=False) self.window['Clear configuration'].update(disabled=False) @@ -851,7 +884,7 @@ def resetGui(self): for el in conf: self.window[el].update(text_color="gray") - self.window['Flash Newest Bootloader'].update(disabled=True) + self.window['Update Bootloader'].update(disabled=True) self.window['flashFactoryBootloader'].update(disabled=True) self.window['Flash configuration'].update(disabled=True) self.window['Clear configuration'].update(disabled=True) @@ -871,6 +904,12 @@ def resetGui(self): self.window.Element('commit').update("-version-") self.window.Element('devState').update("-state-") + # Move back to 'About' page + self.window['-COL1-'].update(visible=True) + self.window['-COL2-'].update(visible=False) + self.window['-COL3-'].update(visible=False) + self.window['-COL4-'].update(visible=False) + def closeDevice(self): if self.bl is not None: self.bl.close() @@ -884,7 +923,7 @@ def getDevices(self): deviceInfos = dai.XLinkConnection.getAllConnectedDevices() if not deviceInfos: self.window.Element('devices').update("No devices") - sg.Popup("No devices found.") + # sg.Popup("No devices found.") else: for deviceInfo in deviceInfos: deviceTxt = deviceInfo.getMxId() @@ -895,7 +934,7 @@ def getDevices(self): self.window.Element('devices').update("Select device", values=listedDevices) except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) def flashConfig(self): values = self.values @@ -911,12 +950,12 @@ def flashConfig(self): try: if self.isPoE: if self.values['staticBut']: - if check_ip(values['ip']) and check_ip(values['mask']) and check_ip(values['gateway'], req=False): + if check_ip(self.window, values['ip']) and check_ip(self.window, values['mask']) and check_ip(self.window, values['gateway'], req=False): conf.setStaticIPv4(values['ip'], values['mask'], values['gateway']) else: raise Exception('IP or Mask missing using static IP configuration') else: - if check_ip(values['ip'], req=False) and check_ip(values['mask'], req=False) and check_ip(values['gateway'], req=False): + if check_ip(self.window, values['ip'], req=False) and check_ip(self.window, values['mask'], req=False) and check_ip(self.window, values['gateway'], req=False): conf.setDynamicIPv4(values['ip'], values['mask'], values['gateway']) conf.setDnsIPv4(values['dns'], values['dnsAlt']) @@ -924,9 +963,9 @@ def flashConfig(self): if int(values['networkTimeout']) >= 0: conf.setNetworkTimeout(timedelta(seconds=int(values['networkTimeout']) / 1000)) else: - sg.Popup("Values can not be negative!") + Popup("Values can not be negative!", window=self.window) if values['mac'] != "": - if check_mac(values['mac']): + if check_mac(self.window, values['mac']): conf.setMacAddress(values['mac']) else: conf.setMacAddress('00:00:00:00:00:00') @@ -935,29 +974,34 @@ def flashConfig(self): if int(values['usbTimeout']) >= 0: conf.setUsbTimeout(timedelta(seconds=int(values['usbTimeout']) / 1000)) else: - sg.Popup("Values can not be negative!") + Popup("Values can not be negative!", window=self.window) if values['usbSpeed'] != "": conf.setUsbMaxSpeed(getattr(dai.UsbSpeed, values['usbSpeed'])) success, error = self.bl.flashConfig(conf) if not success: - sg.Popup(f"Flashing failed: {error}") + Popup(f"Flashing failed: {error}", window=self.window) + return False else: - sg.Popup("Flashing successful.") + Popup("Flashing successful.", window=self.window) + return True + except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) + + return None def clearConfig(self): try: success, error = self.bl.flashConfigClear() if not success: - sg.Popup(f"Clearing configuration failed: {error}") + Popup(f"Clearing configuration failed: {error}", window=self.window) else: - sg.Popup("Successfully cleared configuration.") + Popup("Successfully cleared configuration.", window=self.window) except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) app = DeviceManager() diff --git a/utilities/install_requirements.py b/utilities/install_requirements.py old mode 100644 new mode 100755 index 0d7dcad4f..21a69daa0 --- a/utilities/install_requirements.py +++ b/utilities/install_requirements.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import subprocess import sys diff --git a/utilities/requirements.txt b/utilities/requirements.txt index b7b8c3381..ade6e25c8 100644 --- a/utilities/requirements.txt +++ b/utilities/requirements.txt @@ -1,2 +1,7 @@ PySimpleGUI==4.60.3 -Pillow==9.2.0 +Pillow==9.3.0 +psutil==5.9.3 +numpy>=1.21.4 # For RPi Buster (last successful build) and macOS M1 (first build). But allow for higher versions, to support Python3.11 (not available in 1.21.4 yet) +opencv-contrib-python==4.5.5.62 # Last successful RPi build, also covers M1 with above pinned numpy (otherwise 4.6.0.62 would be required, but that has a bug with charuco boards). Python version not important, abi3 wheels +pyqt5>5,<5.15.6 ; platform_machine != "armv6l" and platform_machine != "armv7l" and platform_machine != "aarch64" and platform_machine != "arm64" +--extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/