diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 48745d3f5d..0f250ca778 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -474,64 +474,6 @@ jobs: with: name: minifi_docker path: build/minifi_docker.tar - legacy_docker_tests: - name: "Docker integration tests (x86_64)" - needs: docker_build - runs-on: ubuntu-24.04 - timeout-minutes: 180 - steps: - - id: checkout - uses: actions/checkout@v4 - - id: run_cmake - name: Run CMake - run: | - mkdir build - cd build - cmake ${DOCKER_CMAKE_FLAGS} .. - - name: Download artifact - uses: actions/download-artifact@v4 - with: - name: minifi_docker - path: build - - name: Load Docker image - run: | - docker load --input ./build/minifi_docker.tar - - id: install_deps - name: Install dependencies for Docker Verify - run: | - sudo apt update - sudo apt install -y python3-virtualenv - - id: free_disk_space - run: | - # We can gain additional disk space on the Ubuntu runners thanks to these suggestions: - # https://github.com/actions/runner-images/issues/2840#issuecomment-790492173 - # https://github.com/actions/runner-images/issues/2606#issuecomment-772683150 - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc - sudo rm -rf "/usr/local/share/boost" - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - - id: test - name: Docker Verify - working-directory: ./build - run: make docker-verify - - name: Test Reporter - if: always() - uses: phoenix-actions/test-reporting@f957cd93fc2d848d556fa0d03c57bc79127b6b5e # v15 - with: - name: Docker integration tests - path: build/behavex_output/behave/*.xml - reporter: java-junit - output-to: 'step-summary' - only-summary: 'true' - list-tests: 'failed' - list-suites: 'failed' - - name: Upload artifact - if: failure() - uses: actions/upload-artifact@v4 - with: - name: behavex_output - path: build/behavex_output modular_docker_tests: name: "Modular Docker integration tests (x86_64)" needs: docker_build diff --git a/.github/workflows/verify-package.yml b/.github/workflows/verify-package.yml index 1c9a39898e..41da7fa002 100644 --- a/.github/workflows/verify-package.yml +++ b/.github/workflows/verify-package.yml @@ -19,77 +19,8 @@ env: -DENABLE_ELASTICSEARCH=OFF -DENABLE_GRAFANA_LOKI=ON -DENABLE_COUCHBASE=ON -DDOCKER_BUILD_ONLY=ON jobs: - docker-tests: - name: "${{ matrix.platform.name }} (${{ matrix.arch }})${{ inputs.enable_fips && ' (FIPS Mode)' || '' }}" - runs-on: ${{ matrix.arch == 'x86_64' && 'ubuntu-24.04' || 'ubuntu-24.04-arm' }} - timeout-minutes: 240 - strategy: - fail-fast: false - matrix: - arch: [x86_64, aarch64] - platform: [ - { name: "Rocky Linux 9", id: "rocky9", build_cmd: "make rocky9_from_rocky_package", verify_cmd: "make docker-verify-rocky9" }, - { name: "Rocky Linux 10", id: "rocky10", build_cmd: "make rocky10_from_rocky_package", verify_cmd: "make docker-verify-rocky10" }, - { name: "Rocky Linux 9 from RPM", id: "rocky9-rpm", build_cmd: "make rocky9_from_rpm_package", verify_cmd: "make docker-verify-rocky9-rpm" }, - { name: "Rocky Linux 10 from RPM", id: "rocky10-rpm", build_cmd: "make rocky10_from_rpm_package", verify_cmd: "make docker-verify-rocky10-rpm" }, - { name: "Ubuntu 22.04", id: "ubuntu_jammy", build_cmd: "make jammy_from_rocky_package", verify_cmd: "make docker-verify-jammy" }, - { name: "Ubuntu 24.04", id: "ubuntu_noble", build_cmd: "make noble_from_rocky_package", verify_cmd: "make docker-verify-noble" }, - { name: "Debian 12", id: "debian_bookworm", build_cmd: "make bookworm_from_rocky_package", verify_cmd: "make docker-verify-bookworm" }, - { name: "Debian 13", id: "debian_trixie", build_cmd: "make trixie_from_rocky_package", verify_cmd: "make docker-verify-trixie" }, - ] - permissions: - contents: read - steps: - - id: checkout - uses: actions/checkout@v4 - - - uses: actions/download-artifact@v4 - with: - run-id: ${{ inputs.artifacts_workflow_id }} - name: minifi-${{ matrix.arch }}-tar - path: build - github-token: ${{ github.token }} - - - uses: actions/download-artifact@v4 - with: - run-id: ${{ inputs.artifacts_workflow_id }} - name: minifi-${{ matrix.arch }}-rpm - path: build - github-token: ${{ github.token }} - - - id: install_deps - run: | - sudo apt update - sudo apt install -y python3-virtualenv - - - run: | - cd build && cmake ${DOCKER_CMAKE_FLAGS} .. - VERIFY_CMD="${{ matrix.platform.verify_cmd }}" - if [[ "${{ inputs.enable_fips }}" == "true" ]]; then - VERIFY_CMD="${VERIFY_CMD}-fips" - fi - ${{ matrix.platform.build_cmd }} && $VERIFY_CMD - - - name: Test Reporter - if: always() - uses: phoenix-actions/test-reporting@f957cd93fc2d848d556fa0d03c57bc79127b6b5e # v15 - with: - name: "${{ matrix.platform.name }} (${{ matrix.arch }})${{ inputs.enable_fips && ' (FIPS Mode)' || '' }}" - path: build/behavex_output/behave/*.xml - reporter: java-junit - output-to: 'step-summary' - list-tests: 'failed' - list-suites: 'failed' - - - name: Upload artifact - if: failure() - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.platform.id }}_${{ matrix.arch }}_behavex_output${{ inputs.enable_fips && '_fips' || '' }} - path: build/behavex_output - docker-test-modular: - name: "${{ matrix.platform.name }} (${{ matrix.arch }}) Modular" + name: "${{ matrix.platform.name }} (${{ matrix.arch }}) Modular${{ inputs.enable_fips && ' (FIPS Mode)' || '' }}" runs-on: ${{ matrix.arch == 'x86_64' && 'ubuntu-24.04' || 'ubuntu-24.04-arm' }} timeout-minutes: 240 strategy: @@ -134,6 +65,9 @@ jobs: - run: | cd build && cmake ${DOCKER_CMAKE_FLAGS} .. VERIFY_CMD="${{ matrix.platform.verify_cmd }}" + if [[ "${{ inputs.enable_fips }}" == "true" ]]; then + VERIFY_CMD="${VERIFY_CMD}-fips" + fi ${{ matrix.platform.build_cmd }} && $VERIFY_CMD - name: Test Reporter diff --git a/README.md b/README.md index 1d0798b691..5898a76b95 100644 --- a/README.md +++ b/README.md @@ -405,12 +405,6 @@ $ make docker-minimal #### Executing integration tests with your docker image You can execute system integration tests using a minifi docker image.
Currently, there are two types of docker integration tests: -##### Monolith legacy tests (features locates in docker/test/integration/features) -(we are in the process of migrating these) - ``` - ~/Development/code/apache/nifi-minifi-cpp/build - $ make docker-verify - ``` ##### Modular tests located near the tested extension (e.g. extensions/aws/tests/features) ``` ~/Development/code/apache/nifi-minifi-cpp/build diff --git a/cmake/DockerConfig.cmake b/cmake/DockerConfig.cmake index e4c9f7fb6d..e1595afda0 100644 --- a/cmake/DockerConfig.cmake +++ b/cmake/DockerConfig.cmake @@ -18,7 +18,7 @@ set(PROJECT_VERSION_STR ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}.${PROJECT_VERSION_PATCH}) include(ProcessorCount) ProcessorCount(PROCESSOR_COUNT) -set(DOCKER_VERIFY_THREADS "${PROCESSOR_COUNT}" CACHE STRING "Number of threads that docker-verify can utilize") +set(DOCKER_VERIFY_THREADS "${PROCESSOR_COUNT}" CACHE STRING "Number of threads that docker-verify-modular can utilize") # Create a custom build target called "docker" that will invoke DockerBuild.sh and create the NiFi-MiNiFi-CPP Docker image add_custom_target( @@ -141,26 +141,21 @@ add_custom_target( -c DOCKER_BASE_IMAGE=${DOCKER_BASE_IMAGE} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docker/) -if (EXISTS ${CMAKE_SOURCE_DIR}/docker/test/integration/features) - set(ENABLED_TAGS "CORE") - foreach(MINIFI_OPTION ${MINIFI_OPTIONS}) - string(FIND ${MINIFI_OPTION} "ENABLE" my_index) - if(my_index EQUAL -1) - continue() - elseif(${${MINIFI_OPTION}}) - set(ENABLED_TAGS "${ENABLED_TAGS},${MINIFI_OPTION}") - endif() - endforeach() +set(ENABLED_TAGS "CORE") +foreach(MINIFI_OPTION ${MINIFI_OPTIONS}) + string(FIND ${MINIFI_OPTION} "ENABLE" my_index) + if(my_index EQUAL -1) + continue() + elseif(${${MINIFI_OPTION}}) + set(ENABLED_TAGS "${ENABLED_TAGS},${MINIFI_OPTION}") + endif() +endforeach() - set(DISABLED_TAGS "SKIP_CI") +set(DISABLED_TAGS "SKIP_CI") - add_custom_target( - docker-verify - COMMAND ${CMAKE_SOURCE_DIR}/docker/DockerVerify.sh ${PROJECT_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS}) - add_custom_target( - docker-verify-modular - COMMAND ${CMAKE_SOURCE_DIR}/docker/RunBehaveTests.sh ${PROJECT_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS}) -endif() +add_custom_target( + docker-verify-modular + COMMAND ${CMAKE_SOURCE_DIR}/docker/RunBehaveTests.sh ${PROJECT_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS}) function(CREATE_DOCKER_TARGET_FROM_ROCKY_PACKAGE BASE_IMAGE TAG_PREFIX INSTALL_PACKAGE_CMD) add_custom_target( diff --git a/cmake/VerifyPackageWithDocker.cmake b/cmake/VerifyPackageWithDocker.cmake index cf6012e169..4af545c82a 100644 --- a/cmake/VerifyPackageWithDocker.cmake +++ b/cmake/VerifyPackageWithDocker.cmake @@ -30,15 +30,12 @@ function(ADD_PACKAGE_VERIFY TAG_PREFIX) set(DISABLED_TAGS "SKIP_CI") - add_custom_target( - docker-verify-${TAG_PREFIX} - COMMAND ${CMAKE_SOURCE_DIR}/docker/DockerVerify.sh --image-tag-prefix ${TAG_PREFIX} ${MINIFI_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS}) add_custom_target( docker-verify-${TAG_PREFIX}-modular COMMAND ${CMAKE_SOURCE_DIR}/docker/RunBehaveTests.sh --image-tag-prefix ${TAG_PREFIX} ${MINIFI_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS}) add_custom_target( - docker-verify-${TAG_PREFIX}-fips - COMMAND ${CMAKE_SOURCE_DIR}/docker/DockerVerify.sh --image-tag-prefix ${TAG_PREFIX} ${MINIFI_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS} --fips) + docker-verify-${TAG_PREFIX}-modular-fips + COMMAND ${CMAKE_SOURCE_DIR}/docker/RunBehaveTests.sh --image-tag-prefix ${TAG_PREFIX} ${MINIFI_VERSION_STR} ${ENABLED_TAGS} --tags_to_exclude=${DISABLED_TAGS} --parallel_processes=${DOCKER_VERIFY_THREADS} --fips) endfunction() @@ -54,16 +51,14 @@ CREATE_DOCKER_TARGET_FROM_RPM_PACKAGE(rockylinux:8 rocky8 "dnf install -y wget p CREATE_DOCKER_TARGET_FROM_RPM_PACKAGE(rockylinux:9 rocky9 "dnf install -y wget python3-devel python3-pip gcc gcc-c++") CREATE_DOCKER_TARGET_FROM_RPM_PACKAGE(rockylinux/rockylinux:10 rocky10 "dnf install -y wget python3-devel python3-pip gcc gcc-c++") -if (EXISTS ${CMAKE_SOURCE_DIR}/docker/test/integration/features) - ADD_PACKAGE_VERIFY(rocky8) - ADD_PACKAGE_VERIFY(rocky9) - ADD_PACKAGE_VERIFY(rocky10) - ADD_PACKAGE_VERIFY(jammy) - ADD_PACKAGE_VERIFY(noble) - ADD_PACKAGE_VERIFY(bookworm) - ADD_PACKAGE_VERIFY(bullseye) - ADD_PACKAGE_VERIFY(trixie) - ADD_PACKAGE_VERIFY(rocky8-rpm) - ADD_PACKAGE_VERIFY(rocky9-rpm) - ADD_PACKAGE_VERIFY(rocky10-rpm) -endif() +ADD_PACKAGE_VERIFY(rocky8) +ADD_PACKAGE_VERIFY(rocky9) +ADD_PACKAGE_VERIFY(rocky10) +ADD_PACKAGE_VERIFY(jammy) +ADD_PACKAGE_VERIFY(noble) +ADD_PACKAGE_VERIFY(bookworm) +ADD_PACKAGE_VERIFY(bullseye) +ADD_PACKAGE_VERIFY(trixie) +ADD_PACKAGE_VERIFY(rocky8-rpm) +ADD_PACKAGE_VERIFY(rocky9-rpm) +ADD_PACKAGE_VERIFY(rocky10-rpm) diff --git a/docker/DockerVerify.sh b/docker/DockerVerify.sh deleted file mode 100755 index 52b68f31fd..0000000000 --- a/docker/DockerVerify.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -die() -{ - local _ret="${2:-1}" - test "${_PRINT_HELP:-no}" = yes && print_help >&2 - echo "$1" >&2 - exit "${_ret}" -} - -_positionals=() -_arg_image_tag_prefix= -_arg_tags_to_exclude= -_arg_parallel_processes=3 - -print_help() -{ - printf '%s\n' "Runs the provided behave tests in a containerized environment" - printf 'Usage: %s [--image-tag-prefix ] [-h|--help] [] ... [] ...\n' "$0" - printf '\t%s\n' ": the version of minifi" - printf '\t%s\n' ": comma-separated list of tags to include, e.g: CORE,ENABLE_KAFKA,ENABLE_SPLUNK" - printf '\t%s\n' "--tags_to_exclude: optional comma-separated list of tags that should be skipped (default: none)" - printf '\t%s\n' "--image-tag-prefix: optional prefix to the docker tag (default: none)" - printf '\t%s\n' "--parallel_processes: optional argument that specifies the number of parallel processes that can be executed simultaneously (default: 3)" - printf '\t%s\n' "--fips: enables FIPS mode by default" - printf '\t%s\n' "-h, --help: Prints help" -} - - -parse_commandline() -{ - _positionals_count=0 - _arg_fips=false # Default to false - while test $# -gt 0 - do - _key="$1" - case "$_key" in - --image-tag-prefix) - test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 - _arg_image_tag_prefix="$2" - shift - ;; - --image-tag-prefix=*) - _arg_image_tag_prefix="${_key##--image-tag-prefix=}" - ;; - --tags_to_exclude) - test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 - _arg_tags_to_exclude="$2" - shift - ;; - --tags_to_exclude=*) - _arg_tags_to_exclude="${_key##--tags_to_exclude=}" - ;; - --parallel_processes) - test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1 - _arg_parallel_processes="$2" - shift - ;; - --parallel_processes=*) - _arg_parallel_processes="${_key##--parallel_processes=}" - ;; - --fips) - _arg_fips=true # Set boolean flag to true when argument is present - ;; - -h|--help) - print_help - exit 0 - ;; - -h*) - print_help - exit 0 - ;; - *) - _last_positional="$1" - _positionals+=("$_last_positional") - _positionals_count=$((_positionals_count + 1)) - ;; - esac - shift - done -} - - - -handle_passed_args_count() -{ - local _required_args_string="'minifi_version' and 'tags_to_run'" - test "${_positionals_count}" -ge 2 || _PRINT_HELP=yes die "FATAL ERROR: Not enough positional arguments - we require at least 2 (namely: $_required_args_string), but got only ${_positionals_count}." 1 -} - - -assign_positional_args() -{ - local _positional_name _shift_for=$1 - _positional_names="_arg_minifi_version _arg_tags_to_run " - - shift "$_shift_for" - for _positional_name in ${_positional_names} - do - test $# -gt 0 || break - eval "$_positional_name=\${1}" || die "Error during argument parsing." 1 - shift - done -} - -parse_commandline "$@" -handle_passed_args_count -assign_positional_args 1 "${_positionals[@]}" - -docker_dir="$( cd "${0%/*}" && pwd )" - -# shellcheck disable=SC2154 -export MINIFI_VERSION=${_arg_minifi_version} -if test -z "$_arg_image_tag_prefix" -then - export MINIFI_TAG_PREFIX="" -else - export MINIFI_TAG_PREFIX=${_arg_image_tag_prefix}- -fi - - if [ "$_arg_fips" = true ]; then - export MINIFI_FIPS="true" - else - export MINIFI_FIPS="false" - fi - -# Create virtual environment for testing -if [[ ! -d ./test-env-py3 ]]; then - echo "Creating virtual environment in ./test-env-py3" 1>&2 - virtualenv --python=python3 ./test-env-py3 -fi - -echo "Activating virtual environment..." 1>&2 -# shellcheck disable=SC1091 -. ./test-env-py3/bin/activate -pip install --trusted-host pypi.python.org --upgrade pip setuptools - -# Install test dependencies -echo "Installing test dependencies..." 1>&2 - -# hint include/library paths if homewbrew is in use -if brew list 2> /dev/null | grep openssl > /dev/null 2>&1; then - echo "Using homebrew paths for openssl" 1>&2 - LDFLAGS="-L$(brew --prefix openssl@1.1)/lib" - export LDFLAGS - CFLAGS="-I$(brew --prefix openssl@1.1)/include" - export CFLAGS - SWIG_FEATURES="-cpperraswarn -includeall -I$(brew --prefix openssl@1.1)/include" - export SWIG_FEATURES -fi - -if ! command swig -version &> /dev/null; then - echo "Swig could not be found on your system (dependency of m2crypto python library). Please install swig to continue." - exit 1 -fi - -pip install -r "${docker_dir}/requirements.txt" - -TEST_DIRECTORY="${docker_dir}/test/integration" -export TEST_DIRECTORY - -# Add --no-logcapture to see logs interleaved with the test output -BEHAVE_OPTS=(--show-progress-bar --logging-level INFO --parallel-processes "${_arg_parallel_processes}" --parallel-scheme feature -o "${PWD}/behavex_output" -t "${_arg_tags_to_run}") -if ! test -z "${_arg_tags_to_exclude}" -then - IFS=',' - read -ra splits <<< "${_arg_tags_to_exclude}" - for split in "${splits[@]}" - do - BEHAVE_OPTS=("${BEHAVE_OPTS[@]}" -t "~${split}") - done -fi - -echo "${BEHAVE_OPTS[@]}" - -cd "${docker_dir}/test/integration" -exec - behavex "${BEHAVE_OPTS[@]}" diff --git a/docker/test/integration/.gitignore b/docker/test/integration/.gitignore deleted file mode 100644 index bdf33494b9..0000000000 --- a/docker/test/integration/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -__pycache__ -/.ropeproject diff --git a/docker/test/integration/__init__.py b/docker/test/integration/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/cluster/ContainerStore.py b/docker/test/integration/cluster/ContainerStore.py deleted file mode 100644 index b5e3df7961..0000000000 --- a/docker/test/integration/cluster/ContainerStore.py +++ /dev/null @@ -1,219 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import shortuuid -from .containers.MinifiContainer import MinifiOptions -from .containers.MinifiContainer import MinifiContainer -from .containers.NifiContainer import NifiContainer -from .containers.NifiContainer import NiFiOptions -from .containers.AzureStorageServerContainer import AzureStorageServerContainer -from .containers.HttpProxyContainer import HttpProxyContainer -from .containers.PostgreSQLServerContainer import PostgreSQLServerContainer -from .containers.SyslogUdpClientContainer import SyslogUdpClientContainer -from .containers.SyslogTcpClientContainer import SyslogTcpClientContainer -from .FeatureContext import FeatureContext - - -class ContainerStore: - def __init__(self, network, image_store, feature_id): - self.feature_id = feature_id - self.minifi_options = MinifiOptions() - self.containers = {} - self.data_directories = {} - self.network = network - self.image_store = image_store - self.nifi_options = NiFiOptions() - - def get_container_name_with_postfix(self, container_name: str): - if not container_name.endswith(self.feature_id): - return container_name + "-" + self.feature_id - return container_name - - def cleanup(self): - for container in self.containers.values(): - container.cleanup() - self.containers = {} - if self.network: - logging.info('Cleaning up network: %s', self.network.name) - self.network.remove() - self.network = None - - def set_directory_bindings(self, volumes, data_directories): - self.vols = volumes - self.data_directories = data_directories - for container in self.containers.values(): - container.vols = self.vols - - def acquire_container(self, context, container_name: str, engine='minifi-cpp', command=None): - container_name = self.get_container_name_with_postfix(container_name) - if container_name is not None and container_name in self.containers: - return self.containers[container_name] - - if container_name is None and (engine == 'nifi' or engine == 'minifi-cpp'): - container_name = engine + '-' + shortuuid.uuid() - logging.info('Container name was not provided; using generated name \'%s\'', container_name) - - feature_context = FeatureContext(feature_id=context.feature_id, - root_ca_cert=context.root_ca_cert, - root_ca_key=context.root_ca_key) - - if engine == 'nifi': - return self.containers.setdefault(container_name, - NifiContainer(feature_context=feature_context, - config_dir=self.data_directories["nifi_config_dir"], - options=self.nifi_options, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - elif engine == 'minifi-cpp': - return self.containers.setdefault(container_name, - MinifiContainer(feature_context=feature_context, - config_dir=self.data_directories["minifi_config_dir"], - options=self.minifi_options, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - elif engine == 'http-proxy': - return self.containers.setdefault(container_name, - HttpProxyContainer(feature_context=feature_context, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - elif engine == 'azure-storage-server': - return self.containers.setdefault(container_name, - AzureStorageServerContainer(feature_context=feature_context, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - elif engine == 'postgresql-server': - return self.containers.setdefault(container_name, - PostgreSQLServerContainer(feature_context=feature_context, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - elif engine == "syslog-udp-client": - return self.containers.setdefault(container_name, - SyslogUdpClientContainer( - feature_context=feature_context, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - elif engine == "syslog-tcp-client": - return self.containers.setdefault(container_name, - SyslogTcpClientContainer( - feature_context=feature_context, - name=container_name, - vols=self.vols, - network=self.network, - image_store=self.image_store, - command=command)) - else: - raise Exception('invalid flow engine: \'%s\'' % engine) - - def acquire_transient_minifi(self, context, container_name: str, engine='minifi-cpp'): - self.acquire_container(context=context, container_name=container_name, engine=engine, command=["/bin/sh", "-c", "timeout 10s {run_minifi} && sleep 100".format(run_minifi=MinifiContainer.MINIFI_LOCATIONS.run_minifi_cmd)]) - - def deploy_container(self, container_name: str): - container_name = self.get_container_name_with_postfix(container_name) - if container_name is None or container_name not in self.containers: - raise Exception('Invalid container to deploy: \'%s\'' % container_name) - - self.containers[container_name].deploy() - - def deploy_all(self): - for container in self.containers.values(): - container.deploy() - - def stop_container(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - if container_name not in self.containers: - logging.error('Could not stop container because it is not found: \'%s\'', container_name) - return - self.containers[container_name].stop() - - def kill_container(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - if container_name not in self.containers: - logging.error('Could not kill container because it is not found: \'%s\'', container_name) - return - self.containers[container_name].kill() - - def restart_container(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - if container_name not in self.containers: - logging.error('Could not restart container because it is not found: \'%s\'', container_name) - return - self.containers[container_name].restart() - - def enable_provenance_repository_in_minifi(self): - self.minifi_options.enable_provenance = True - - def set_ssl_context_properties_in_minifi(self): - self.minifi_options.set_ssl_context_properties = True - - def enable_sql_in_minifi(self): - self.minifi_options.enable_sql = True - - def set_yaml_in_minifi(self): - self.minifi_options.config_format = "yaml" - - def set_json_in_minifi(self): - self.minifi_options.config_format = "json" - - def enable_log_metrics_publisher_in_minifi(self): - self.minifi_options.enable_log_metrics_publisher = True - - def enable_openssl_fips_mode_in_minifi(self): - self.minifi_options.enable_openssl_fips_mode = True - - def disable_openssl_fips_mode_in_minifi(self): - self.minifi_options.enable_openssl_fips_mode = False - - def llama_model_is_downloaded_in_minifi(self): - self.minifi_options.download_llama_model = True - - def get_startup_finished_log_entry(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - return self.containers[container_name].get_startup_finished_log_entry() - - def log_source(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - return self.containers[container_name].log_source() - - def get_app_log(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - return self.containers[container_name].get_app_log() - - def get_container_names(self, engine=None): - return [key for key in self.containers.keys() if not engine or self.containers[key].get_engine() == engine] - - def enable_ssl_in_nifi(self): - self.nifi_options.use_ssl = True - - def run_post_startup_commands(self, container_name): - container_name = self.get_container_name_with_postfix(container_name) - return self.containers[container_name].run_post_startup_commands() diff --git a/docker/test/integration/cluster/DockerCommunicator.py b/docker/test/integration/cluster/DockerCommunicator.py deleted file mode 100644 index 313f41d1db..0000000000 --- a/docker/test/integration/cluster/DockerCommunicator.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import docker -import logging -import sys -import tempfile -import tarfile -import os -import io -import uuid - - -class DockerCommunicator: - def __init__(self): - self.client = docker.from_env() - - def create_docker_network(self, feature_id: str): - net_name = 'minifi_integration_test_network-' + feature_id - logging.debug('Creating network: %s', net_name) - return self.client.networks.create(net_name) - - @staticmethod - def get_stdout_encoding(): - # Use UTF-8 both when sys.stdout present but set to None (explicitly piped output - # and also some CI such as GitHub Actions). - encoding = getattr(sys.stdout, "encoding", None) - if encoding is None: - encoding = "utf8" - return encoding - - def execute_command(self, container_name, command): - (code, output) = self.client.containers.get(container_name).exec_run(command) - return (code, output.decode(self.get_stdout_encoding())) - - def get_app_log_from_docker_container(self, container_name): - try: - container = self.client.containers.get(container_name) - except Exception: - return 'not started', None - - if b'Segmentation fault' in container.logs(): - logging.warning('Container segfaulted: %s', container.name) - self.segfault = True - - container.reload() - return container.status, container.logs() - - def __put_archive(self, container_name, path, data): - return self.client.containers.get(container_name).put_archive(path, data) - - def write_content_to_container(self, content, container_name, dst_path): - with tempfile.TemporaryDirectory() as td: - with tarfile.open(os.path.join(td, 'content.tar'), mode='w') as tar: - info = tarfile.TarInfo(name=os.path.basename(dst_path)) - info.size = len(content) - tar.addfile(info, io.BytesIO(content.encode('utf-8'))) - with open(os.path.join(td, 'content.tar'), 'rb') as data: - return self.__put_archive(container_name, os.path.dirname(dst_path), data.read()) - - def copy_file_from_container(self, container_name, src_path_in_container, dest_dir_on_host) -> bool: - try: - container = self.client.containers.get(container_name) - (bits, _) = container.get_archive(src_path_in_container) - tmp_tar_path = os.path.join(dest_dir_on_host, "retrieved_file_" + str(uuid.uuid4()) + ".tar") - with open(tmp_tar_path, 'wb') as out_file: - for chunk in bits: - out_file.write(chunk) - with tarfile.open(tmp_tar_path, 'r') as tar: - tar.extractall(dest_dir_on_host) - os.remove(tmp_tar_path) - return True - except Exception as ex: - logging.error('Exception occurred while copying file from container: %s', str(ex)) - return False diff --git a/docker/test/integration/cluster/DockerTestCluster.py b/docker/test/integration/cluster/DockerTestCluster.py deleted file mode 100644 index 32d158170f..0000000000 --- a/docker/test/integration/cluster/DockerTestCluster.py +++ /dev/null @@ -1,243 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import time -import re - -from .LogSource import LogSource -from .ContainerStore import ContainerStore -from .DockerCommunicator import DockerCommunicator -from .checkers.AzureChecker import AzureChecker -from .checkers.PostgresChecker import PostgresChecker -from .checkers.ModbusChecker import ModbusChecker -from utils import get_peak_memory_usage, get_minifi_pid, get_memory_usage - - -class DockerTestCluster: - def __init__(self, context, feature_id): - self.segfault = False - self.vols = {} - self.container_communicator = DockerCommunicator() - self.container_store = ContainerStore(self.container_communicator.create_docker_network(feature_id), context.image_store, feature_id=feature_id) - self.azure_checker = AzureChecker(self.container_communicator) - self.postgres_checker = PostgresChecker(self.container_communicator) - self.modbus_checker = ModbusChecker(self.container_communicator) - - def cleanup(self): - self.container_store.cleanup() - - def set_directory_bindings(self, volumes, data_directories): - self.container_store.set_directory_bindings(volumes, data_directories) - - def acquire_container(self, context, name: str, engine: str = 'minifi-cpp', command=None): - return self.container_store.acquire_container(context=context, container_name=name, engine=engine, command=command) - - def acquire_transient_minifi(self, context, name: str, engine: str = 'minifi-cpp'): - return self.container_store.acquire_transient_minifi(context=context, container_name=name, engine=engine) - - def deploy_container(self, name): - self.container_store.deploy_container(name) - - def deploy_all(self): - self.container_store.deploy_all() - - def stop_container(self, container_name): - self.container_store.stop_container(container_name) - - def kill_container(self, container_name): - self.container_store.kill_container(container_name) - - def restart_container(self, container_name): - self.container_store.restart_container(container_name) - - def enable_provenance_repository_in_minifi(self): - self.container_store.enable_provenance_repository_in_minifi() - - def enable_c2_in_minifi(self): - self.container_store.enable_c2_in_minifi() - - def enable_c2_with_ssl_in_minifi(self): - self.container_store.enable_c2_with_ssl_in_minifi() - - def fetch_flow_config_from_c2_url_in_minifi(self): - self.container_store.fetch_flow_config_from_c2_url_in_minifi() - - def set_ssl_context_properties_in_minifi(self): - self.container_store.set_ssl_context_properties_in_minifi() - - def enable_openssl_fips_mode_in_minifi(self): - self.container_store.enable_openssl_fips_mode_in_minifi() - - def disable_openssl_fips_mode_in_minifi(self): - self.container_store.disable_openssl_fips_mode_in_minifi() - - def enable_sql_in_minifi(self): - self.container_store.enable_sql_in_minifi() - - def set_yaml_in_minifi(self): - self.container_store.set_yaml_in_minifi() - - def set_json_in_minifi(self): - self.container_store.set_json_in_minifi() - - def enable_log_metrics_publisher_in_minifi(self): - self.container_store.enable_log_metrics_publisher_in_minifi() - - def llama_model_is_downloaded_in_minifi(self): - self.container_store.llama_model_is_downloaded_in_minifi() - - def get_app_log(self, container_name): - container_name = self.container_store.get_container_name_with_postfix(container_name) - log_source = self.container_store.log_source(container_name) - if log_source == LogSource.FROM_DOCKER_CONTAINER: - return self.container_communicator.get_app_log_from_docker_container(container_name) - elif log_source == LogSource.FROM_GET_APP_LOG_METHOD: - return self.container_store.get_app_log(container_name) - else: - raise Exception("Unexpected log source '%s'" % log_source) - - def __wait_for_app_logs_impl(self, container_name, log_entry, timeout_seconds, count, use_regex): - wait_start_time = time.perf_counter() - while True: - logging.info('Waiting for app-logs `%s` in container `%s`', log_entry, container_name) - status, logs = self.get_app_log(container_name) - if logs is not None: - if not use_regex and logs.decode("utf-8").count(log_entry) >= count: - return True - elif use_regex and len(re.findall(log_entry, logs.decode("utf-8"))) >= count: - return True - elif status == 'exited': - return False - time.sleep(1) - if timeout_seconds < (time.perf_counter() - wait_start_time): - break - return False - - def wait_for_app_logs_regex(self, container_name, log_entry, timeout_seconds, count=1): - return self.__wait_for_app_logs_impl(container_name, log_entry, timeout_seconds, count, True) - - def wait_for_app_logs(self, container_name, log_entry, timeout_seconds, count=1): - return self.__wait_for_app_logs_impl(container_name, log_entry, timeout_seconds, count, False) - - def wait_for_startup_log(self, container_name, timeout_seconds): - return self.wait_for_app_logs_regex(container_name, self.container_store.get_startup_finished_log_entry(container_name), timeout_seconds, 1) - - def log_app_output(self): - for container_name in self.container_store.get_container_names(): - _, logs = self.get_app_log(container_name) - if logs is not None: - logging.info("Logs of container '%s':", container_name) - for line in logs.decode("utf-8").splitlines(): - logging.info(line) - - def check_http_proxy_access(self, container_name, url): - container_name = self.container_store.get_container_name_with_postfix(container_name) - (code, output) = self.container_communicator.execute_command(container_name, ["cat", "/var/log/squid/access.log"]) - return code == 0 and url.lower() in output.lower() \ - and ((output.count("TCP_DENIED") != 0 - and output.count("TCP_MISS") >= output.count("TCP_DENIED")) - or output.count("TCP_DENIED") == 0 and "TCP_MISS" in output) - - def check_azure_storage_server_data(self, container_name, test_data): - container_name = self.container_store.get_container_name_with_postfix(container_name) - return self.azure_checker.check_azure_storage_server_data(container_name, test_data) - - def add_test_blob(self, blob_name, content="", with_snapshot=False): - return self.azure_checker.add_test_blob(blob_name, content, with_snapshot) - - def check_azure_blob_and_snapshot_count(self, blob_and_snapshot_count, timeout_seconds): - return self.azure_checker.check_azure_blob_and_snapshot_count(blob_and_snapshot_count, timeout_seconds) - - def check_azure_blob_storage_is_empty(self, timeout_seconds): - return self.azure_checker.check_azure_blob_storage_is_empty(timeout_seconds) - - def check_query_results(self, postgresql_container_name, query, number_of_rows, timeout_seconds): - postgresql_container_name = self.container_store.get_container_name_with_postfix(postgresql_container_name) - return self.postgres_checker.check_query_results(postgresql_container_name, query, number_of_rows, timeout_seconds) - - def segfault_happened(self): - return self.segfault - - def check_minifi_log_matches_regex(self, regex, timeout_seconds=60, count=1): - for container_name in self.container_store.get_container_names("minifi-cpp"): - line_found = self.wait_for_app_logs_regex(container_name, regex, timeout_seconds, count) - if line_found: - return True - return False - - def check_container_log_contents(self, container_engine, line, timeout_seconds=60, count=1): - for container_name in self.container_store.get_container_names(container_engine): - line_found = self.wait_for_app_logs(container_name, line, timeout_seconds, count) - if line_found: - return True - return False - - def check_minifi_log_does_not_contain(self, line, wait_time_seconds): - time.sleep(wait_time_seconds) - for container_name in self.container_store.get_container_names("minifi-cpp"): - _, logs = self.get_app_log(container_name) - if logs is not None and 1 <= logs.decode("utf-8").count(line): - return False - return True - - def wait_for_container_startup_to_finish(self, container_name): - container_name = self.container_store.get_container_name_with_postfix(container_name) - startup_success = self.wait_for_startup_log(container_name, 160) - if not startup_success: - logging.error("Cluster startup failed for %s", container_name) - return False - if not self.container_store.run_post_startup_commands(container_name): - logging.error("Failed to run post startup commands for container %s", container_name) - return False - return True - - def wait_for_all_containers_to_finish_startup(self): - for container_name in self.container_store.get_container_names(): - if not self.wait_for_container_startup_to_finish(container_name): - return False - return True - - def wait_for_peak_memory_usage_to_exceed(self, minimum_peak_memory_usage: int, timeout_seconds: int) -> bool: - start_time = time.perf_counter() - while (time.perf_counter() - start_time) < timeout_seconds: - current_peak_memory_usage = get_peak_memory_usage(get_minifi_pid()) - if current_peak_memory_usage is None: - logging.warning("Failed to determine peak memory usage") - return False - if current_peak_memory_usage > minimum_peak_memory_usage: - return True - time.sleep(1) - logging.warning(f"Peak memory usage ({current_peak_memory_usage}) didnt exceed minimum asserted peak memory usage {minimum_peak_memory_usage}") - return False - - def wait_for_memory_usage_to_drop_below(self, max_memory_usage: int, timeout_seconds: int) -> bool: - start_time = time.perf_counter() - while (time.perf_counter() - start_time) < timeout_seconds: - current_memory_usage = get_memory_usage(get_minifi_pid()) - if current_memory_usage is None: - logging.warning("Failed to determine memory usage") - return False - if current_memory_usage < max_memory_usage: - return True - current_memory_usage = get_memory_usage(get_minifi_pid()) - time.sleep(1) - logging.warning(f"Memory usage ({current_memory_usage}) is more than the maximum asserted memory usage ({max_memory_usage})") - return False - - def set_value_on_plc_with_modbus(self, container_name, modbus_cmd): - return self.modbus_checker.set_value_on_plc_with_modbus(container_name, modbus_cmd) - - def enable_ssl_in_nifi(self): - self.container_store.enable_ssl_in_nifi() diff --git a/docker/test/integration/cluster/DockerTestDirectoryBindings.py b/docker/test/integration/cluster/DockerTestDirectoryBindings.py deleted file mode 100644 index 8a4e58759d..0000000000 --- a/docker/test/integration/cluster/DockerTestDirectoryBindings.py +++ /dev/null @@ -1,217 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import os -import shutil -import hashlib -import subprocess -import OpenSSL.crypto -from ssl_utils.SSL_cert_utils import make_self_signed_cert, make_cert_without_extended_usage, make_server_cert, make_client_cert - - -class DockerTestDirectoryBindings: - def __init__(self, feature_id: str): - self.data_directories = {} - self.feature_id = feature_id - - def __del__(self): - self.delete_data_directories() - - def cleanup_io(self): - for folder in [self.data_directories[self.feature_id]["input_dir"], self.data_directories[self.feature_id]["output_dir"]]: - for filename in os.listdir(folder): - file_path = os.path.join(folder, filename) - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - - def create_new_data_directories(self): - self.data_directories[self.feature_id] = { - "input_dir": "/tmp/.nifi-test-input." + self.feature_id, - "output_dir": "/tmp/.nifi-test-output." + self.feature_id, - "resources_dir": "/tmp/.nifi-test-resources." + self.feature_id, - "system_certs_dir": "/tmp/.nifi-test-resources." + self.feature_id + "/system_certs_dir", - "minifi_config_dir": "/tmp/.nifi-test-minifi-config-dir." + self.feature_id, - "nifi_config_dir": "/tmp/.nifi-test-nifi-config-dir." + self.feature_id - } - - [self.create_directory(directory) for directory in self.data_directories[self.feature_id].values()] - - # Add resources - test_dir = os.environ['TEST_DIRECTORY'] # Based on DockerVerify.sh - shutil.copytree(test_dir + "/resources/minifi", self.data_directories[self.feature_id]["minifi_config_dir"], dirs_exist_ok=True) - - def get_data_directories(self): - return self.data_directories[self.feature_id] - - def docker_path_to_local_path(self, docker_path): - # Docker paths are currently hard-coded - if docker_path == "/tmp/input": - return self.data_directories[self.feature_id]["input_dir"] - if docker_path == "/tmp/output": - return self.data_directories[self.feature_id]["output_dir"] - if docker_path == "/tmp/resources": - return self.data_directories[self.feature_id]["resources_dir"] - # Might be worth reworking these - if docker_path == "/tmp/output/success": - self.create_directory(self.data_directories[self.feature_id]["output_dir"] + "/success") - return self.data_directories[self.feature_id]["output_dir"] + "/success" - if docker_path == "/tmp/output/failure": - self.create_directory(self.data_directories[self.feature_id]["output_dir"] + "/failure") - return self.data_directories[self.feature_id]["output_dir"] + "/failure" - raise Exception("Docker directory \"%s\" has no preset bindings." % docker_path) - - def get_directory_bindings(self): - """ - Performs a standard container flow deployment with the addition - of volumes supporting test input/output directories. - """ - vols = {} - vols[self.data_directories[self.feature_id]["input_dir"]] = {"bind": "/tmp/input", "mode": "rw"} - vols[self.data_directories[self.feature_id]["output_dir"]] = {"bind": "/tmp/output", "mode": "rw"} - vols[self.data_directories[self.feature_id]["resources_dir"]] = {"bind": "/tmp/resources", "mode": "rw"} - vols[self.data_directories[self.feature_id]["system_certs_dir"]] = {"bind": "/usr/local/share/certs", "mode": "rw"} - vols[self.data_directories[self.feature_id]["minifi_config_dir"]] = {"bind": "/tmp/minifi_config", "mode": "rw"} - vols[self.data_directories[self.feature_id]["nifi_config_dir"]] = {"bind": "/tmp/nifi_config", "mode": "rw"} - return vols - - @staticmethod - def create_directory(dir): - os.makedirs(dir) - os.chmod(dir, 0o777) - - @staticmethod - def delete_tmp_directory(dir): - assert dir.startswith("/tmp/") - if not dir.endswith("/"): - dir = dir + "/" - # Sometimes rmtree does clean not up as expected, setting ignore_errors does not help either - shutil.rmtree(dir, ignore_errors=True) - - def delete_data_directories(self): - for directories in self.data_directories.values(): - for directory in directories.values(): - self.delete_tmp_directory(directory) - - @staticmethod - def put_file_contents(file_abs_path, contents): - logging.info('Writing %d bytes of content to file: %s', len(contents), file_abs_path) - os.makedirs(os.path.dirname(file_abs_path), exist_ok=True) - with open(file_abs_path, 'wb') as test_input_file: - test_input_file.write(contents) - os.chmod(file_abs_path, 0o0777) - - def put_test_resource(self, file_name, contents): - """ - Creates a resource file in the test resource dir and writes - the given content to it. - """ - - file_abs_path = os.path.join(self.data_directories[self.feature_id]["resources_dir"], file_name) - self.put_file_contents(file_abs_path, contents) - - def get_test_resource_path(self, file_name): - return os.path.join(self.data_directories[self.feature_id]["resources_dir"], file_name) - - def put_test_input(self, file_name, contents): - file_abs_path = os.path.join(self.data_directories[self.feature_id]["input_dir"], file_name) - self.put_file_contents(file_abs_path, contents) - - def put_file_to_docker_path(self, path, file_name, contents): - file_abs_path = os.path.join(self.docker_path_to_local_path(path), file_name) - self.put_file_contents(file_abs_path, contents) - - @staticmethod - def generate_md5_hash(file_path): - with open(file_path, 'rb') as file: - md5_hash = hashlib.md5() - for chunk in iter(lambda: file.read(4096), b''): - md5_hash.update(chunk) - - return md5_hash.hexdigest() - - def put_random_file_to_docker_path(self, path: str, file_name: str, file_size: int): - file_abs_path = os.path.join(self.docker_path_to_local_path(path), file_name) - with open(file_abs_path, 'wb') as test_input_file: - test_input_file.write(os.urandom(file_size)) - os.chmod(file_abs_path, 0o0777) - return self.generate_md5_hash(file_abs_path) - - def create_cert_files(self): - self.root_ca_cert, self.root_ca_key = make_self_signed_cert("root CA") - - minifi_client_cert, minifi_client_key = make_cert_without_extended_usage(common_name=f"minifi-cpp-flow-{self.feature_id}", - ca_cert=self.root_ca_cert, - ca_key=self.root_ca_key) - minifi_server_cert, minifi_server_key = make_server_cert(common_name=f"server-{self.feature_id}", - ca_cert=self.root_ca_cert, - ca_key=self.root_ca_key) - self_signed_server_cert, self_signed_server_key = make_self_signed_cert(f"server-{self.feature_id}") - - self.put_test_resource('root_ca.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=self.root_ca_cert)) - self.put_test_resource("system_certs_dir/ca-root-nss.crt", - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=self.root_ca_cert)) - self.put_test_resource('minifi_client.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=minifi_client_cert)) - self.put_test_resource('minifi_client.key', - OpenSSL.crypto.dump_privatekey(type=OpenSSL.crypto.FILETYPE_PEM, - pkey=minifi_client_key)) - self.put_test_resource('minifi_server.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=minifi_server_cert) - + OpenSSL.crypto.dump_privatekey(type=OpenSSL.crypto.FILETYPE_PEM, - pkey=minifi_server_key)) - self.put_test_resource('self_signed_server.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=self_signed_server_cert) - + OpenSSL.crypto.dump_privatekey(type=OpenSSL.crypto.FILETYPE_PEM, - pkey=self_signed_server_key)) - self.put_test_resource('minifi_merged_cert.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=minifi_client_cert) - + OpenSSL.crypto.dump_privatekey(type=OpenSSL.crypto.FILETYPE_PEM, - pkey=minifi_client_key)) - nifi_client_cert, nifi_client_key = make_server_cert(common_name=f"nifi-{self.feature_id}", - ca_cert=self.root_ca_cert, - ca_key=self.root_ca_key) - self.put_test_resource('nifi_client.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=nifi_client_cert)) - self.put_test_resource('nifi_client.key', - OpenSSL.crypto.dump_privatekey(type=OpenSSL.crypto.FILETYPE_PEM, - pkey=nifi_client_key)) - base = os.path.dirname(self.get_test_resource_path('nifi_client.key')) - test_dir = os.environ['TEST_DIRECTORY'] # Based on DockerVerify.sh - cmd = [ - os.path.join(test_dir, "convert_cert_to_jks.sh"), - base, - os.path.join(base, "nifi_client.key"), - os.path.join(base, "nifi_client.crt"), - os.path.join(base, "root_ca.crt"), - ] - subprocess.run(cmd, check=True) - - clientuser_cert, clientuser_key = make_client_cert("clientuser", ca_cert=self.root_ca_cert, ca_key=self.root_ca_key) - self.put_test_resource('clientuser.crt', - OpenSSL.crypto.dump_certificate(type=OpenSSL.crypto.FILETYPE_PEM, - cert=clientuser_cert)) - self.put_test_resource('clientuser.key', - OpenSSL.crypto.dump_privatekey(type=OpenSSL.crypto.FILETYPE_PEM, - pkey=clientuser_key)) diff --git a/docker/test/integration/cluster/FeatureContext.py b/docker/test/integration/cluster/FeatureContext.py deleted file mode 100644 index febb524d58..0000000000 --- a/docker/test/integration/cluster/FeatureContext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class FeatureContext: - def __init__(self, feature_id, root_ca_cert, root_ca_key): - self.id = feature_id - self.root_ca_cert = root_ca_cert - self.root_ca_key = root_ca_key diff --git a/docker/test/integration/cluster/ImageStore.py b/docker/test/integration/cluster/ImageStore.py deleted file mode 100644 index 2a21f9d293..0000000000 --- a/docker/test/integration/cluster/ImageStore.py +++ /dev/null @@ -1,191 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .containers.MinifiContainer import MinifiContainer -import logging -import tarfile -import docker -from io import BytesIO -from textwrap import dedent -import os - - -class ImageStore: - def __init__(self): - self.client = docker.from_env() - self.images = dict() - self.test_dir = os.environ['TEST_DIRECTORY'] # Based on DockerVerify.sh - - def cleanup(self): - # Clean up images - for image in self.images.values(): - logging.info('Cleaning up image: %s', image.id) - self.client.images.remove(image.id, force=True) - - def get_image(self, container_engine): - if container_engine in self.images: - return self.images[container_engine] - - if container_engine == "minifi-cpp-sql": - image = self.__build_minifi_cpp_sql_image() - elif container_engine == "minifi-cpp-with-llamacpp-model": - image = self.__build_minifi_cpp_image_with_llamacpp_model() - elif container_engine == "http-proxy": - image = self.__build_http_proxy_image() - elif container_engine == "postgresql-server": - image = self.__build_postgresql_server_image() - else: - raise Exception("There is no associated image for " + container_engine) - - self.images[container_engine] = image - return image - - def __build_minifi_cpp_sql_image(self): - if "rocky" in MinifiContainer.MINIFI_TAG_PREFIX: - install_sql_cmd = "dnf -y install postgresql-odbc" - so_location = "psqlodbca.so" - elif "bullseye" in MinifiContainer.MINIFI_TAG_PREFIX or "bookworm" in MinifiContainer.MINIFI_TAG_PREFIX: - install_sql_cmd = "apt -y install odbc-postgresql" - so_location = "/usr/lib/$(gcc -dumpmachine)/odbc/psqlodbca.so" - elif "jammy" in MinifiContainer.MINIFI_TAG_PREFIX or "noble" in MinifiContainer.MINIFI_TAG_PREFIX: - install_sql_cmd = "apt -y install odbc-postgresql" - so_location = "/usr/lib/$(gcc -dumpmachine)/odbc/psqlodbca.so" - else: - install_sql_cmd = "apk --update --no-cache add psqlodbc" - so_location = "psqlodbca.so" - dockerfile = dedent("""\ - FROM {base_image} - USER root - RUN {install_sql_cmd} - RUN echo "[PostgreSQL ANSI]" > /odbcinst.ini.template && \ - echo "Description=PostgreSQL ODBC driver (ANSI version)" >> /odbcinst.ini.template && \ - echo "Driver={so_location}" >> /odbcinst.ini.template && \ - echo "Setup=libodbcpsqlS.so" >> /odbcinst.ini.template && \ - echo "Debug=0" >> /odbcinst.ini.template && \ - echo "CommLog=1" >> /odbcinst.ini.template && \ - echo "UsageCount=1" >> /odbcinst.ini.template && \ - echo "" >> /odbcinst.ini.template && \ - echo "[PostgreSQL Unicode]" >> /odbcinst.ini.template && \ - echo "Description=PostgreSQL ODBC driver (Unicode version)" >> /odbcinst.ini.template && \ - echo "Driver=psqlodbcw.so" >> /odbcinst.ini.template && \ - echo "Setup=libodbcpsqlS.so" >> /odbcinst.ini.template && \ - echo "Debug=0" >> /odbcinst.ini.template && \ - echo "CommLog=1" >> /odbcinst.ini.template && \ - echo "UsageCount=1" >> /odbcinst.ini.template - RUN odbcinst -i -d -f /odbcinst.ini.template - RUN echo "[ODBC]" > /etc/odbc.ini && \ - echo "Driver = PostgreSQL ANSI" >> /etc/odbc.ini && \ - echo "Description = PostgreSQL Data Source" >> /etc/odbc.ini && \ - echo "Servername = postgres" >> /etc/odbc.ini && \ - echo "Port = 5432" >> /etc/odbc.ini && \ - echo "Protocol = 8.4" >> /etc/odbc.ini && \ - echo "UserName = postgres" >> /etc/odbc.ini && \ - echo "Password = password" >> /etc/odbc.ini && \ - echo "Database = postgres" >> /etc/odbc.ini - USER minificpp - """.format(base_image='apacheminificpp:' + MinifiContainer.MINIFI_TAG_PREFIX + MinifiContainer.MINIFI_VERSION, - install_sql_cmd=install_sql_cmd, so_location=so_location)) - - return self.__build_image(dockerfile) - - def __build_minifi_cpp_image_with_llamacpp_model(self): - dockerfile = dedent("""\ - FROM {base_image} - RUN mkdir {models_path} && wget https://huggingface.co/bartowski/Qwen2-0.5B-Instruct-GGUF/resolve/main/Qwen2-0.5B-Instruct-IQ3_M.gguf --directory-prefix={models_path} - """.format(base_image='apacheminificpp:' + MinifiContainer.MINIFI_TAG_PREFIX + MinifiContainer.MINIFI_VERSION, - models_path=MinifiContainer.MINIFI_LOCATIONS.models_path)) - - return self.__build_image(dockerfile) - - def __build_http_proxy_image(self): - dockerfile = dedent("""\ - FROM {base_image} - RUN apt -y update && apt install -y apache2-utils - RUN htpasswd -b -c /etc/squid/.squid_users {proxy_username} {proxy_password} - RUN echo 'auth_param basic program /usr/lib/squid/basic_ncsa_auth /etc/squid/.squid_users' > /etc/squid/squid.conf && \ - echo 'auth_param basic realm proxy' >> /etc/squid/squid.conf && \ - echo 'acl authenticated proxy_auth REQUIRED' >> /etc/squid/squid.conf && \ - echo 'http_access allow authenticated' >> /etc/squid/squid.conf && \ - echo 'http_port {proxy_port}' >> /etc/squid/squid.conf - """.format(base_image='ubuntu/squid:5.2-22.04_beta', proxy_username='admin', proxy_password='test101', proxy_port='3128')) - - return self.__build_image(dockerfile) - - def __build_postgresql_server_image(self): - dockerfile = dedent("""\ - FROM {base_image} - RUN mkdir -p /docker-entrypoint-initdb.d - RUN echo "#!/bin/bash" > /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo "set -e" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo "psql -v ON_ERROR_STOP=1 --username "postgres" --dbname "postgres" <<-EOSQL" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " CREATE TABLE test_table (int_col INTEGER, text_col TEXT);" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " INSERT INTO test_table (int_col, text_col) VALUES (1, 'apple');" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " INSERT INTO test_table (int_col, text_col) VALUES (2, 'banana');" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " INSERT INTO test_table (int_col, text_col) VALUES (3, 'pear');" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " CREATE TABLE test_table2 (int_col INTEGER, \\"tExT_Col\\" TEXT);" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " INSERT INTO test_table2 (int_col, \\"tExT_Col\\") VALUES (5, 'ApPlE');" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo " INSERT INTO test_table2 (int_col, \\"tExT_Col\\") VALUES (6, 'BaNaNa');" >> /docker-entrypoint-initdb.d/init-user-db.sh && \ - echo "EOSQL" >> /docker-entrypoint-initdb.d/init-user-db.sh - """.format(base_image='postgres:17.4')) - return self.__build_image(dockerfile) - - def __build_image(self, dockerfile, context_files=[]): - conf_dockerfile_buffer = BytesIO() - docker_context_buffer = BytesIO() - - try: - # Overlay conf onto base nifi image - conf_dockerfile_buffer.write(dockerfile.encode()) - conf_dockerfile_buffer.seek(0) - - with tarfile.open(mode='w', fileobj=docker_context_buffer) as docker_context: - dockerfile_info = tarfile.TarInfo('Dockerfile') - dockerfile_info.size = conf_dockerfile_buffer.getbuffer().nbytes - docker_context.addfile(dockerfile_info, - fileobj=conf_dockerfile_buffer) - - for context_file_path in context_files: - with open(context_file_path, 'rb') as file: - file_info = tarfile.TarInfo(os.path.basename(context_file_path)) - file_info.size = os.path.getsize(context_file_path) - docker_context.addfile(file_info, file) - docker_context_buffer.seek(0) - - logging.info('Creating configured image...') - image = self.client.images.build(fileobj=docker_context_buffer, - custom_context=True, - rm=True, - forcerm=True) - logging.info('Created image with id: %s', image[0].id) - - finally: - conf_dockerfile_buffer.close() - docker_context_buffer.close() - - return image[0] - - def __build_image_by_path(self, dir, name=None): - try: - logging.info('Creating configured image...') - image = self.client.images.build(path=dir, - tag=name, - rm=True, - forcerm=True) - logging.info('Created image with id: %s', image[0].id) - return image[0] - except Exception as e: - logging.info(e) - raise diff --git a/docker/test/integration/cluster/LogSource.py b/docker/test/integration/cluster/LogSource.py deleted file mode 100644 index c5ff6981f8..0000000000 --- a/docker/test/integration/cluster/LogSource.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class LogSource: - FROM_DOCKER_CONTAINER = "from docker container" - FROM_GET_APP_LOG_METHOD = "from get_app_log() method" diff --git a/docker/test/integration/cluster/__init__.py b/docker/test/integration/cluster/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/cluster/checkers/AzureChecker.py b/docker/test/integration/cluster/checkers/AzureChecker.py deleted file mode 100644 index ddeb1c9347..0000000000 --- a/docker/test/integration/cluster/checkers/AzureChecker.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from azure.storage.blob import BlobServiceClient -from azure.core.exceptions import ResourceExistsError -from utils import retry_check, wait_for - - -class AzureChecker: - AZURE_CONNECTION_STRING = \ - ("DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;" - "BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;") - - def __init__(self, container_communicator): - self.container_communicator = container_communicator - self.blob_service_client = BlobServiceClient.from_connection_string(AzureChecker.AZURE_CONNECTION_STRING) - - @retry_check() - def check_azure_storage_server_data(self, container_name, test_data): - (code, output) = self.container_communicator.execute_command(container_name, ["find", "/data/__blobstorage__", "-type", "f"]) - if code != 0: - return False - data_file = output.strip() - (code, file_data) = self.container_communicator.execute_command(container_name, ["cat", data_file]) - return code == 0 and test_data in file_data - - def add_test_blob(self, blob_name, content="", with_snapshot=False): - try: - self.blob_service_client.create_container("test-container") - except ResourceExistsError: - logging.debug('test-container already exists') - - blob_client = self.blob_service_client.get_blob_client(container="test-container", blob=blob_name) - blob_client.upload_blob(content) - - if with_snapshot: - blob_client.create_snapshot() - - def __get_blob_and_snapshot_count(self): - container_client = self.blob_service_client.get_container_client("test-container") - return len(list(container_client.list_blobs(include=['deleted']))) - - def check_azure_blob_and_snapshot_count(self, blob_and_snapshot_count, timeout_seconds): - return wait_for(lambda: self.__get_blob_and_snapshot_count() == blob_and_snapshot_count, timeout_seconds) - - def check_azure_blob_storage_is_empty(self, timeout_seconds): - return wait_for(lambda: self.__get_blob_and_snapshot_count() == 0, timeout_seconds) diff --git a/docker/test/integration/cluster/checkers/ModbusChecker.py b/docker/test/integration/cluster/checkers/ModbusChecker.py deleted file mode 100644 index 9ef26a6fec..0000000000 --- a/docker/test/integration/cluster/checkers/ModbusChecker.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -class ModbusChecker: - def __init__(self, container_communicator): - self.container_communicator = container_communicator - - def set_value_on_plc_with_modbus(self, container_name, modbus_cmd): - print(modbus_cmd) - (code, output) = self.container_communicator.execute_command(container_name, ["modbus", "localhost", modbus_cmd]) - print(output) - return code == 0 diff --git a/docker/test/integration/cluster/checkers/PostgresChecker.py b/docker/test/integration/cluster/checkers/PostgresChecker.py deleted file mode 100644 index 80b84e89f9..0000000000 --- a/docker/test/integration/cluster/checkers/PostgresChecker.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from utils import wait_for - - -class PostgresChecker: - def __init__(self, container_communicator): - self.container_communicator = container_communicator - - def __query_postgres_server(self, postgresql_container_name, query, number_of_rows): - (code, output) = self.container_communicator.execute_command(postgresql_container_name, ["psql", "-U", "postgres", "-c", query]) - return code == 0 and str(number_of_rows) + " rows" in output - - def check_query_results(self, postgresql_container_name, query, number_of_rows, timeout_seconds): - return wait_for(lambda: self.__query_postgres_server(postgresql_container_name, query, number_of_rows), timeout_seconds) diff --git a/docker/test/integration/cluster/checkers/__init__.py b/docker/test/integration/cluster/checkers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/cluster/containers/AzureStorageServerContainer.py b/docker/test/integration/cluster/containers/AzureStorageServerContainer.py deleted file mode 100644 index c1918cdbd1..0000000000 --- a/docker/test/integration/cluster/containers/AzureStorageServerContainer.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from .Container import Container - - -class AzureStorageServerContainer(Container): - def __init__(self, feature_context, name, vols, network, image_store, command=None): - super().__init__(feature_context, name, 'azure-storage-server', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "Azurite Queue service is successfully listening at" - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running azure storage server docker container...') - self.client.containers.run( - "mcr.microsoft.com/azure-storage/azurite:3.35.0", - detach=True, - name=self.name, - network=self.network.name, - ports={'10000/tcp': 10000, '10001/tcp': 10001}, - entrypoint=self.command) - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/Container.py b/docker/test/integration/cluster/containers/Container.py deleted file mode 100644 index d2e73fd4ba..0000000000 --- a/docker/test/integration/cluster/containers/Container.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import docker -import logging - -from ..LogSource import LogSource -from ..FeatureContext import FeatureContext - - -class Container: - def __init__(self, feature_context: FeatureContext, name, engine, vols, network, image_store, command): - self.feature_context = feature_context - self.name = name - self.engine = engine - self.vols = vols - self.network = network - self.image_store = image_store - self.command = command - - # Get docker client - self.client = docker.from_env() - self.deployed = False - self.post_startup_commands_finished = False - - def cleanup(self): - logging.info('Cleaning up container: %s', self.name) - try: - self.client.containers.get(self.name).remove(v=True, force=True) - except docker.errors.NotFound: - logging.warning("Container '%s' has been cleaned up already, nothing to be done.", self.name) - pass - - def set_deployed(self): - if self.deployed: - return False - self.deployed = True - return True - - def get_name(self): - return self.name - - def get_engine(self): - return self.engine - - def deploy(self): - raise NotImplementedError() - - def log_source(self): - return LogSource.FROM_DOCKER_CONTAINER - - def stop(self): - logging.info('Stopping docker container "%s"...', self.name) - self.client.containers.get(self.name).stop() - logging.info('Successfully stopped docker container "%s"', self.name) - self.deployed = False - - def kill(self): - logging.info('Killing docker container "%s"...', self.name) - self.client.containers.get(self.name).kill() - logging.info('Successfully killed docker container "%s"', self.name) - self.deployed = False - - def restart(self): - logging.info('Restarting docker container "%s"...', self.name) - self.client.containers.get(self.name).restart() - logging.info('Successfully restarted docker container "%s"', self.name) - self.deployed = True - - def get_startup_finished_log_entry(self): - raise NotImplementedError() - - def get_app_log(self): - raise NotImplementedError() - - def run_post_startup_commands(self): - return True diff --git a/docker/test/integration/cluster/containers/FlowContainer.py b/docker/test/integration/cluster/containers/FlowContainer.py deleted file mode 100644 index b3ac65c68b..0000000000 --- a/docker/test/integration/cluster/containers/FlowContainer.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .Container import Container - - -class Parameter: - def __init__(self, name, value): - self.name = name - self.value = value - - -class FlowContainer(Container): - def __init__(self, feature_context, config_dir, name, engine, vols, network, image_store, command): - super().__init__(feature_context=feature_context, - name=name, - engine=engine, - vols=vols, - network=network, - image_store=image_store, - command=command) - self.start_nodes = [] - self.config_dir = config_dir - self.controllers = [] - self.parameter_contexts = dict() - self.parameter_context_name = None - - def get_start_nodes(self): - return self.start_nodes - - def add_start_node(self, node): - self.start_nodes.append(node) - - def add_controller(self, controller): - self.controllers.append(controller) - - def get_controller(self, name): - for controller in self.controllers: - if controller.name == name: - return controller - raise ValueError(f"Controller with name '{name}' not found") - - def add_parameter_to_flow_config(self, parameter_context_name, parameter_name, parameter_value): - if parameter_context_name in self.parameter_contexts: - self.parameter_contexts[parameter_context_name].append(Parameter(parameter_name, parameter_value)) - else: - self.parameter_contexts[parameter_context_name] = [Parameter(parameter_name, parameter_value)] - - def set_parameter_context_name(self, parameter_context_name): - self.parameter_context_name = parameter_context_name diff --git a/docker/test/integration/cluster/containers/HttpProxyContainer.py b/docker/test/integration/cluster/containers/HttpProxyContainer.py deleted file mode 100644 index 6f9859a3ec..0000000000 --- a/docker/test/integration/cluster/containers/HttpProxyContainer.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from .Container import Container - - -class HttpProxyContainer(Container): - def __init__(self, feature_context, name, vols, network, image_store, command=None): - super().__init__(feature_context, name, 'http-proxy', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "Accepting HTTP Socket connections at" - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running http-proxy docker container...') - self.client.containers.run( - self.image_store.get_image(self.get_engine()), - detach=True, - name=self.name, - network=self.network.name, - entrypoint=self.command) - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/MinifiContainer.py b/docker/test/integration/cluster/containers/MinifiContainer.py deleted file mode 100644 index 5031a1704a..0000000000 --- a/docker/test/integration/cluster/containers/MinifiContainer.py +++ /dev/null @@ -1,169 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import logging -import shortuuid -import shutil -import copy - -from .FlowContainer import FlowContainer -from minifi.flow_serialization.Minifi_flow_yaml_serializer import Minifi_flow_yaml_serializer -from minifi.flow_serialization.Minifi_flow_json_serializer import Minifi_flow_json_serializer - - -class MinifiOptions: - def __init__(self): - self.enable_provenance = False - self.enable_sql = False - self.config_format = "json" - self.set_ssl_context_properties = False - self.enable_log_metrics_publisher = False - if "true" in os.environ['MINIFI_FIPS']: - self.enable_openssl_fips_mode = True - else: - self.enable_openssl_fips_mode = False - self.download_llama_model = False - - -class MinifiLocations: - MINIFI_TAG_PREFIX = os.environ['MINIFI_TAG_PREFIX'] - MINIFI_VERSION = os.environ['MINIFI_VERSION'] - - def __init__(self): - if "rpm" in MinifiLocations.MINIFI_TAG_PREFIX: - self.run_minifi_cmd = '/usr/bin/minifi' - self.config_path = '/etc/nifi-minifi-cpp/config.yml' - self.properties_path = '/etc/nifi-minifi-cpp/minifi.properties' - self.log_properties_path = '/etc/nifi-minifi-cpp/minifi-log.properties' - self.uid_properties_path = '/etc/nifi-minifi-cpp/minifi-uid.properties' - self.models_path = '/var/lib/nifi-minifi-cpp/models' - self.minifi_home = '/var/lib/nifi-minifi-cpp' - else: - self.run_minifi_cmd = '/opt/minifi/minifi-current/bin/minifi.sh run' - self.config_path = '/opt/minifi/minifi-current/conf/config.yml' - self.properties_path = '/opt/minifi/minifi-current/conf/minifi.properties' - self.log_properties_path = '/opt/minifi/minifi-current/conf/minifi-log.properties' - self.uid_properties_path = '/opt/minifi/minifi-current/conf/minifi-uid.properties' - self.models_path = '/opt/minifi/minifi-current/models' - self.minifi_home = '/opt/minifi/minifi-current' - - -class MinifiContainer(FlowContainer): - MINIFI_TAG_PREFIX = os.environ['MINIFI_TAG_PREFIX'] - MINIFI_VERSION = os.environ['MINIFI_VERSION'] - MINIFI_LOCATIONS = MinifiLocations() - - def __init__(self, feature_context, config_dir, options, name, vols, network, image_store, command=None): - self.options = options - super().__init__(feature_context=feature_context, - config_dir=config_dir, - name=name, - engine='minifi-cpp', - vols=copy.copy(vols), - network=network, - image_store=image_store, - command=command) - self.container_specific_config_dir = self._create_container_config_dir(self.config_dir) - os.chmod(self.container_specific_config_dir, 0o777) - - def _create_container_config_dir(self, config_dir): - container_config_dir = os.path.join(config_dir, str(shortuuid.uuid())) - os.makedirs(container_config_dir) - for file_name in os.listdir(config_dir): - source = os.path.join(config_dir, file_name) - destination = os.path.join(container_config_dir, file_name) - if os.path.isfile(source): - shutil.copy(source, destination) - return container_config_dir - - def get_startup_finished_log_entry(self): - return "Starting Flow Controller" - - def _create_config(self): - if self.options.config_format == "yaml": - serializer = Minifi_flow_yaml_serializer() - elif self.options.config_format == "json": - serializer = Minifi_flow_json_serializer() - else: - assert False, "Invalid flow configuration format: {}".format(self.options.config_format) - test_flow_yaml = serializer.serialize(self.start_nodes, self.controllers, self.parameter_context_name, self.parameter_contexts) - logging.info('Using generated flow config yml:\n%s', test_flow_yaml) - absolute_flow_config_path = os.path.join(self.container_specific_config_dir, "config.yml") - with open(absolute_flow_config_path, 'wb') as config_file: - config_file.write(test_flow_yaml.encode('utf-8')) - os.chmod(absolute_flow_config_path, 0o777) - - def _create_properties(self): - properties_file_path = os.path.join(self.container_specific_config_dir, 'minifi.properties') - with open(properties_file_path, 'a') as f: - f.write("nifi.flow.configuration.file={conf_path}\n".format(conf_path=MinifiContainer.MINIFI_LOCATIONS.config_path)) - f.write("nifi.provenance.repository.directory.default={minifi_home}/provenance_repository\n".format(minifi_home=MinifiContainer.MINIFI_LOCATIONS.minifi_home)) - f.write("nifi.flowfile.repository.directory.default={minifi_home}/flowfile_repository\n".format(minifi_home=MinifiContainer.MINIFI_LOCATIONS.minifi_home)) - f.write("nifi.database.content.repository.directory.default={minifi_home}/content_repository\n".format(minifi_home=MinifiContainer.MINIFI_LOCATIONS.minifi_home)) - - if self.options.set_ssl_context_properties: - f.write("nifi.remote.input.secure=true\n") - f.write("nifi.security.client.certificate=/tmp/resources/minifi_client.crt\n") - f.write("nifi.security.client.private.key=/tmp/resources/minifi_client.key\n") - f.write("nifi.security.client.ca.certificate=/tmp/resources/root_ca.crt\n") - - if not self.options.enable_provenance: - f.write("nifi.provenance.repository.class.name=NoOpRepository\n") - - metrics_publisher_classes = [] - if self.options.enable_log_metrics_publisher: - f.write("nifi.metrics.publisher.LogMetricsPublisher.metrics=RepositoryMetrics\n") - f.write("nifi.metrics.publisher.LogMetricsPublisher.logging.interval=1s\n") - metrics_publisher_classes.append("LogMetricsPublisher") - - if metrics_publisher_classes: - f.write("nifi.metrics.publisher.class=" + ",".join(metrics_publisher_classes) + "\n") - - if self.options.enable_openssl_fips_mode: - f.write("nifi.openssl.fips.support.enable=true\n") - else: - f.write("nifi.openssl.fips.support.enable=false\n") - - def _setup_config(self): - self._create_properties() - self._create_config() - self.vols[os.path.join(self.container_specific_config_dir, 'config.yml')] = {"bind": MinifiContainer.MINIFI_LOCATIONS.config_path, "mode": "rw"} - self.vols[os.path.join(self.container_specific_config_dir, 'minifi.properties')] = {"bind": MinifiContainer.MINIFI_LOCATIONS.properties_path, "mode": "rw"} - self.vols[os.path.join(self.container_specific_config_dir, 'minifi-log.properties')] = {"bind": MinifiContainer.MINIFI_LOCATIONS.log_properties_path, "mode": "rw"} - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running minifi docker container...') - self._setup_config() - - if self.options.enable_sql: - image = self.image_store.get_image('minifi-cpp-sql') - elif self.options.download_llama_model: - image = self.image_store.get_image('minifi-cpp-with-llamacpp-model') - else: - image = 'apacheminificpp:' + MinifiContainer.MINIFI_TAG_PREFIX + MinifiContainer.MINIFI_VERSION - - self.client.containers.run( - image, - detach=True, - name=self.name, - network=self.network.name, - entrypoint=self.command, - volumes=self.vols) - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/NifiContainer.py b/docker/test/integration/cluster/containers/NifiContainer.py deleted file mode 100644 index 4120fa073b..0000000000 --- a/docker/test/integration/cluster/containers/NifiContainer.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging - -from .FlowContainer import FlowContainer -from minifi.flow_serialization.Nifi_flow_json_serializer import Nifi_flow_json_serializer -import gzip -import os - - -class NiFiOptions: - def __init__(self): - self.use_ssl = False - - -class NifiContainer(FlowContainer): - NIFI_VERSION = '2.2.0' - NIFI_ROOT = '/opt/nifi/nifi-' + NIFI_VERSION - - def __init__(self, feature_context, config_dir, options, name, vols, network, image_store, command=None): - if not command: - if options.use_ssl: - entry_command = (r"sed -i -e 's/^\(nifi.remote.input.host\)=.*/\1={name}/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.remote.input.secure\)=.*/\1=true/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.sensitive.props.key\)=.*/\1=secret_key_12345/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.web.https.port\)=.*/\1=8443/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.web.https.host\)=.*/\1={name}/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keystore\)=.*/\1=\/tmp\/resources\/keystore.jks/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keystoreType\)=.*/\1=jks/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keystorePasswd\)=.*/\1=passw0rd1!/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keyPasswd\)=.*/#\1=passw0rd1!/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.truststore\)=.*/\1=\/tmp\/resources\/truststore.jks/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.truststoreType\)=.*/\1=jks/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.truststorePasswd\)=.*/\1=passw0rd1!/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.remote.input.socket.port\)=.*/\1=10443/' {nifi_root}/conf/nifi.properties && " - r"cp /tmp/nifi_config/flow.json.gz {nifi_root}/conf && {nifi_root}/bin/nifi.sh run & " - r"nifi_pid=$! &&" - r"tail -F --pid=${{nifi_pid}} {nifi_root}/logs/nifi-app.log").format(name=name, nifi_root=NifiContainer.NIFI_ROOT) - else: - entry_command = (r"sed -i -e 's/^\(nifi.remote.input.host\)=.*/\1={name}/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.sensitive.props.key\)=.*/\1=secret_key_12345/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.remote.input.secure\)=.*/\1=false/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.web.http.port\)=.*/\1=8080/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.web.https.port\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.web.https.host\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.web.http.host\)=.*/\1={name}/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keystore\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keystoreType\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keystorePasswd\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.keyPasswd\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.truststore\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.truststoreType\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.security.truststorePasswd\)=.*/\1=/' {nifi_root}/conf/nifi.properties && " - r"sed -i -e 's/^\(nifi.remote.input.socket.port\)=.*/\1=10000/' {nifi_root}/conf/nifi.properties && " - r"cp /tmp/nifi_config/flow.json.gz {nifi_root}/conf && {nifi_root}/bin/nifi.sh run & " - r"nifi_pid=$! &&" - r"tail -F --pid=${{nifi_pid}} {nifi_root}/logs/nifi-app.log").format(name=name, nifi_root=NifiContainer.NIFI_ROOT) - command = ["/bin/sh", "-c", entry_command] - super().__init__(feature_context, config_dir, name, 'nifi', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "Started Application in" - - def __create_config(self): - serializer = Nifi_flow_json_serializer() - test_flow_json = serializer.serialize(self.start_nodes, NifiContainer.NIFI_VERSION) - logging.info('Using generated flow config json:\n%s', test_flow_json) - - with gzip.open(os.path.join(self.config_dir, "flow.json.gz"), 'wb') as gz_file: - gz_file.write(test_flow_json.encode()) - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running nifi docker container...') - self.__create_config() - self.client.containers.run( - "apache/nifi:" + NifiContainer.NIFI_VERSION, - detach=True, - name=self.name, - hostname=self.name, - network=self.network.name, - entrypoint=self.command, - volumes=self.vols) - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/PostgreSQLServerContainer.py b/docker/test/integration/cluster/containers/PostgreSQLServerContainer.py deleted file mode 100644 index d0ebed9c7d..0000000000 --- a/docker/test/integration/cluster/containers/PostgreSQLServerContainer.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .Container import Container - - -class PostgreSQLServerContainer(Container): - def __init__(self, feature_context, name, vols, network, image_store, command=None): - super().__init__(feature_context, name, 'postgresql-server', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "database system is ready to accept connections" - - def deploy(self): - if not self.set_deployed(): - return - - self.docker_container = self.client.containers.run( - self.image_store.get_image(self.get_engine()), - detach=True, - name=self.name, - network=self.network.name, - environment=["POSTGRES_PASSWORD=password"], - entrypoint=self.command) diff --git a/docker/test/integration/cluster/containers/SyslogTcpClientContainer.py b/docker/test/integration/cluster/containers/SyslogTcpClientContainer.py deleted file mode 100644 index b8af4d37c6..0000000000 --- a/docker/test/integration/cluster/containers/SyslogTcpClientContainer.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from .Container import Container - - -class SyslogTcpClientContainer(Container): - def __init__(self, feature_context, name, vols, network, image_store, command=None): - super().__init__(feature_context, name, 'syslog-tcp-client', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "Syslog TCP client started" - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running a Syslog tcp client docker container...') - self.client.containers.run( - "ubuntu:24.04", - detach=True, - name=self.name, - network=self.network.name, - entrypoint='/bin/bash -c "echo Syslog TCP client started; while true; do logger --tcp -n ' - f'minifi-cpp-flow-{self.feature_context.id} -P 514 sample_log; sleep 1; done"') - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/SyslogUdpClientContainer.py b/docker/test/integration/cluster/containers/SyslogUdpClientContainer.py deleted file mode 100644 index 5019714f94..0000000000 --- a/docker/test/integration/cluster/containers/SyslogUdpClientContainer.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from .Container import Container - - -class SyslogUdpClientContainer(Container): - def __init__(self, feature_context, name, vols, network, image_store, command=None): - super().__init__(feature_context, name, 'syslog-udp-client', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "Syslog UDP client started" - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running a Syslog udp client docker container...') - self.client.containers.run( - "ubuntu:24.04", - detach=True, - name=self.name, - network=self.network.name, - entrypoint='/bin/bash -c "echo Syslog UDP client started; while true; do logger --udp -n ' - f'minifi-cpp-flow-{self.feature_context.id} -P 514 sample_log; sleep 1; done"') - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/ZookeeperContainer.py b/docker/test/integration/cluster/containers/ZookeeperContainer.py deleted file mode 100644 index 93c9b9d4da..0000000000 --- a/docker/test/integration/cluster/containers/ZookeeperContainer.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from .Container import Container - - -class ZookeeperContainer(Container): - def __init__(self, feature_context, name, vols, network, image_store, command): - super().__init__(feature_context, name, 'zookeeper', vols, network, image_store, command) - - def get_startup_finished_log_entry(self): - return "binding to port" - - def deploy(self): - if not self.set_deployed(): - return - - logging.info('Creating and running zookeeper docker container...') - self.client.containers.run( - image="zookeeper:3.9.2", - detach=True, - name=self.name, - network=self.network.name, - entrypoint=self.command) - logging.info('Added container \'%s\'', self.name) diff --git a/docker/test/integration/cluster/containers/__init__.py b/docker/test/integration/cluster/containers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/convert_cert_to_jks.sh b/docker/test/integration/convert_cert_to_jks.sh deleted file mode 100755 index 33c37b677f..0000000000 --- a/docker/test/integration/convert_cert_to_jks.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -euo pipefail - -# Usage: ./create_jks.sh - -DIR=$1 -SSL_KEY_PATH=$2 -SSL_CERT_PATH=$3 -CA_CERT_PATH=$4 - -KEYSTORE="$DIR/keystore.jks" -TRUSTSTORE="$DIR/truststore.jks" -PKCS12_FILE="$DIR/keystore.p12" -PASSWORD="passw0rd1!" - -cat "${CA_CERT_PATH}" >> "${SSL_CERT_PATH}" - -if [ ! -d "$DIR" ]; then - mkdir -p "$DIR" -fi - -openssl pkcs12 -export \ - -inkey "$SSL_KEY_PATH" \ - -in "$SSL_CERT_PATH" \ - -name "nifi-key" \ - -out "$PKCS12_FILE" \ - -password pass:$PASSWORD - -keytool -importkeystore \ - -destkeystore "$KEYSTORE" \ - -deststoretype jks \ - -destalias nifi-key \ - -srckeystore "$PKCS12_FILE" \ - -srcstoretype pkcs12 \ - -srcalias "nifi-key" \ - -storepass "$PASSWORD" \ - -srcstorepass "$PASSWORD" \ - -noprompt - -keytool -importcert \ - -alias "nifi-cert" \ - -file "$CA_CERT_PATH" \ - -keystore "$TRUSTSTORE" \ - -storepass "$PASSWORD" \ - -noprompt diff --git a/docker/test/integration/features/MiNiFi_integration_test_driver.py b/docker/test/integration/features/MiNiFi_integration_test_driver.py deleted file mode 100644 index 5599dc7f94..0000000000 --- a/docker/test/integration/features/MiNiFi_integration_test_driver.py +++ /dev/null @@ -1,358 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import os -import time -import uuid - -from pydoc import locate -from minifi.core.InputPort import InputPort -from minifi.core.OutputPort import OutputPort -from cluster.DockerTestCluster import DockerTestCluster -from minifi.validators.OutputValidator import OutputValidator -from minifi.validators.EmptyFilesOutPutValidator import EmptyFilesOutPutValidator -from minifi.validators.NoFileOutPutValidator import NoFileOutPutValidator -from minifi.validators.SingleFileOutputValidator import SingleFileOutputValidator -from minifi.validators.MultiFileOutputValidator import MultiFileOutputValidator -from minifi.validators.SingleOrMultiFileOutputValidator import SingleOrMultiFileOutputValidator -from minifi.validators.SingleOrMultiFileOutputRegexValidator import SingleOrMultiFileOutputRegexValidator -from minifi.validators.NoContentCheckFileNumberValidator import NoContentCheckFileNumberValidator -from minifi.validators.NumFileRangeValidator import NumFileRangeValidator -from minifi.validators.NumFileRangeAndFileSizeValidator import NumFileRangeAndFileSizeValidator -from minifi.validators.SingleJSONFileOutputValidator import SingleJSONFileOutputValidator -from utils import decode_escaped_str, get_minifi_pid, get_peak_memory_usage - - -class MiNiFi_integration_test: - def __init__(self, context, feature_id: str): - self.feature_id = feature_id - self.cluster = DockerTestCluster(context, feature_id=feature_id) - - self.connectable_nodes = [] - # Remote process groups are not connectables - self.remote_process_groups = [] - self.file_system_observer = None - self.test_file_hash = None - - self.docker_directory_bindings = context.directory_bindings - self.cluster.set_directory_bindings(self.docker_directory_bindings.get_directory_bindings(), self.docker_directory_bindings.get_data_directories()) - - def get_container_name_with_postfix(self, container_name: str): - return self.cluster.container_store.get_container_name_with_postfix(container_name) - - def cleanup(self): - self.cluster.cleanup() - if self.file_system_observer: - self.file_system_observer.observer.unschedule_all() - - def acquire_container(self, context, name, engine='minifi-cpp', command=None): - return self.cluster.acquire_container(context=context, name=name, engine=engine, command=command) - - def acquire_transient_minifi(self, context, name, engine='minifi-cpp'): - return self.cluster.acquire_transient_minifi(context=context, name=name, engine=engine) - - def start_nifi(self, context): - self.cluster.acquire_container(context=context, name='nifi', engine='nifi') - self.cluster.deploy_container('nifi') - assert self.cluster.wait_for_container_startup_to_finish('nifi') or self.cluster.log_app_output() - - def start(self, container_name=None): - if container_name is not None: - logging.info("Starting container %s", container_name) - self.cluster.deploy_container(container_name) - assert self.cluster.wait_for_container_startup_to_finish(container_name) or self.cluster.log_app_output() - return - logging.info("MiNiFi_integration_test start") - self.cluster.deploy_all() - assert self.cluster.wait_for_all_containers_to_finish_startup() or self.cluster.log_app_output() - - def stop(self, container_name): - logging.info("Stopping container %s", container_name) - self.cluster.stop_container(container_name) - - def kill(self, container_name): - logging.info("Killing container %s", container_name) - self.cluster.kill_container(container_name) - - def restart(self, container_name): - logging.info("Restarting container %s", container_name) - self.cluster.restart_container(container_name) - - def add_node(self, processor): - if processor.get_name() in (elem.get_name() for elem in self.connectable_nodes): - raise Exception("Trying to register processor with an already registered name: \"%s\"" % processor.get_name()) - self.connectable_nodes.append(processor) - - def get_or_create_node_by_name(self, node_name): - node = self.get_node_by_name(node_name) - if node is None: - if node_name == "RemoteProcessGroup": - raise Exception("Trying to register RemoteProcessGroup without an input port or address.") - node = locate("minifi.processors." + node_name + "." + node_name)() - node.set_name(node_name) - self.add_node(node) - return node - - def get_node_by_name(self, name): - for node in self.connectable_nodes: - if name == node.get_name(): - return node - raise Exception("Trying to fetch unknown node: \"%s\"" % name) - - def add_remote_process_group(self, remote_process_group): - if remote_process_group.get_name() in (elem.get_name() for elem in self.remote_process_groups): - raise Exception("Trying to register remote_process_group with an already registered name: \"%s\"" % remote_process_group.get_name()) - self.remote_process_groups.append(remote_process_group) - - def get_remote_process_group_by_name(self, name): - for node in self.remote_process_groups: - if name == node.get_name(): - return node - raise Exception("Trying to fetch unknown node: \"%s\"" % name) - - @staticmethod - def generate_input_port_for_remote_process_group(remote_process_group, name, use_compression=False): - input_port_node = InputPort(name, remote_process_group) - # Generate an MD5 hash unique to the remote process group id - input_port_node.set_uuid(uuid.uuid3(remote_process_group.get_uuid(), "input_port")) - input_port_node.set_use_compression(use_compression) - return input_port_node - - @staticmethod - def generate_output_port_for_remote_process_group(remote_process_group, name, use_compression=False): - output_port_node = OutputPort(name, remote_process_group) - # Generate an MD5 hash unique to the remote process group id - output_port_node.set_uuid(uuid.uuid3(remote_process_group.get_uuid(), "output_port")) - output_port_node.set_use_compression(use_compression) - return output_port_node - - def add_test_data(self, path, test_data, file_name=None): - if file_name is None: - file_name = str(uuid.uuid4()) - test_data = decode_escaped_str(test_data) - self.docker_directory_bindings.put_file_to_docker_path(path, file_name, test_data.encode('utf-8')) - - def add_random_test_data(self, path: str, size: int, file_name: str = None): - if file_name is None: - file_name = str(uuid.uuid4()) - self.test_file_hash = self.docker_directory_bindings.put_random_file_to_docker_path(path, file_name, size) - - def put_test_resource(self, file_name, contents): - self.docker_directory_bindings.put_test_resource(file_name, contents) - - def get_test_resource_path(self, file_name): - return self.docker_directory_bindings.get_test_resource_path(file_name) - - def add_file_system_observer(self, file_system_observer): - self.file_system_observer = file_system_observer - - def check_for_no_files_generated(self, wait_time_in_seconds): - output_validator = NoFileOutPutValidator() - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output_after_time_period(wait_time_in_seconds, output_validator) - - def check_for_no_files_generated_in_subdir(self, wait_time_in_seconds, subdir): - output_validator = NoFileOutPutValidator() - output_validator.set_output_dir(self.file_system_observer.get_output_dir() + "/" + subdir) - self.__check_output_after_time_period(wait_time_in_seconds, output_validator) - - def check_for_single_file_with_content_generated(self, content, timeout_seconds): - output_validator = SingleFileOutputValidator(decode_escaped_str(content)) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator, 1) - - def check_for_single_json_file_with_content_generated(self, content, timeout_seconds): - output_validator = SingleJSONFileOutputValidator(content) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator, 1) - - def check_for_multiple_files_generated(self, file_count, timeout_seconds, expected_content=[]): - output_validator = MultiFileOutputValidator(file_count, [decode_escaped_str(content) for content in expected_content]) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator, file_count) - - def check_subdirectory(self, sub_directory: str, expected_contents: list, timeout: int, interval: float = 1.0) -> bool: - logging.info("check_directory") - start_time = time.time() - expected_contents.sort() - while time.time() - start_time < timeout: - try: - current_contents = [] - directory = self.file_system_observer.get_output_dir() + "/" + sub_directory - current_files = os.listdir(directory) - for file in current_files: - file_path = os.path.join(directory, file) - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: - content = f.read() - current_contents.append(content) - current_contents.sort() - - if current_contents == expected_contents: - logging.info("subdir checks out") - return True - logging.info(f"expected: {expected_contents} vs actual {current_contents}") - - except Exception as e: - print(f"Error checking directory: {e}") - - time.sleep(interval) - - return False - - def check_for_at_least_one_file_with_matching_content(self, regex, timeout_seconds): - output_validator = SingleOrMultiFileOutputRegexValidator(regex) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator) - - def check_for_at_least_one_file_with_content_generated(self, content, timeout_seconds): - output_validator = SingleOrMultiFileOutputValidator(decode_escaped_str(content)) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator) - - def check_for_num_files_generated(self, num_flowfiles, timeout_seconds): - output_validator = NoContentCheckFileNumberValidator(num_flowfiles) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator, max(1, num_flowfiles)) - - def check_for_num_file_range_generated_after_wait(self, min_files: int, max_files: int, wait_time_in_seconds: int): - output_validator = NumFileRangeValidator(min_files, max_files) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output_after_time_period(wait_time_in_seconds, output_validator) - - def check_for_num_file_range_generated_with_timeout(self, min_files: int, max_files: int, timeout_in_seconds: int): - output_validator = NumFileRangeValidator(min_files, max_files) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output_over_time_period(timeout_in_seconds, output_validator) - - def check_for_num_file_range_and_min_size_generated(self, min_files: int, max_files: int, min_size: int, wait_time_in_seconds: int): - output_validator = NumFileRangeAndFileSizeValidator(min_files, max_files, min_size) - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output_over_time_period(wait_time_in_seconds, output_validator) - - def check_for_an_empty_file_generated(self, timeout_seconds): - output_validator = EmptyFilesOutPutValidator() - output_validator.set_output_dir(self.file_system_observer.get_output_dir()) - self.__check_output(timeout_seconds, output_validator, 1) - - def __check_output_after_time_period(self, wait_time_in_seconds, output_validator): - time.sleep(wait_time_in_seconds) - self.__validate(output_validator) - - def __check_output_over_time_period(self, wait_time_in_seconds: int, output_validator: OutputValidator): - start_time = time.perf_counter() - while True: - assert not self.cluster.segfault_happened() or self.cluster.log_app_output() - if output_validator.validate(): - return - time.sleep(1) - if wait_time_in_seconds < (time.perf_counter() - start_time): - break - assert output_validator.validate() or self.cluster.log_app_output() - - def __check_output(self, timeout_seconds, output_validator, max_files=0): - result = self.file_system_observer.validate_output(timeout_seconds, output_validator, max_files) - assert not self.cluster.segfault_happened() or self.cluster.log_app_output() - assert result or self.cluster.log_app_output() - - def __validate(self, validator): - assert not self.cluster.segfault_happened() or self.cluster.log_app_output() - assert validator.validate() or self.cluster.log_app_output() - - def check_http_proxy_access(self, http_proxy_container_name, url): - assert self.cluster.check_http_proxy_access(http_proxy_container_name, url) or self.cluster.log_app_output() - - def check_azure_storage_server_data(self, azure_container_name, object_data): - assert self.cluster.check_azure_storage_server_data(azure_container_name, object_data) or self.cluster.log_app_output() - - def check_minifi_log_contents(self, line, timeout_seconds=60, count=1): - self.check_container_log_contents("minifi-cpp", line, timeout_seconds, count) - - def check_minifi_log_matches_regex(self, regex, timeout_seconds=60, count=1): - assert self.cluster.check_minifi_log_matches_regex(regex, timeout_seconds, count) or self.cluster.log_app_output() - - def check_container_log_contents(self, container_engine, line, timeout_seconds=60, count=1): - assert self.cluster.check_container_log_contents(container_engine, line, timeout_seconds, count) or self.cluster.log_app_output() - - def check_minifi_log_does_not_contain(self, line, wait_time_seconds): - assert self.cluster.check_minifi_log_does_not_contain(line, wait_time_seconds) or self.cluster.log_app_output() - - def check_query_results(self, postgresql_container_name, query, number_of_rows, timeout_seconds): - assert self.cluster.check_query_results(postgresql_container_name, query, number_of_rows, timeout_seconds) or self.cluster.log_app_output() - - def check_container_log_matches_regex(self, container_name, log_pattern, timeout_seconds, count=1): - assert self.cluster.wait_for_app_logs_regex(container_name, log_pattern, timeout_seconds, count) or self.cluster.log_app_output() - - def add_test_blob(self, blob_name, content, with_snapshot): - self.cluster.add_test_blob(blob_name, content, with_snapshot) - - def check_azure_blob_storage_is_empty(self, timeout_seconds): - assert self.cluster.check_azure_blob_storage_is_empty(timeout_seconds) or self.cluster.log_app_output() - - def check_azure_blob_and_snapshot_count(self, blob_and_snapshot_count, timeout_seconds): - assert self.cluster.check_azure_blob_and_snapshot_count(blob_and_snapshot_count, timeout_seconds) or self.cluster.log_app_output() - - def check_if_peak_memory_usage_exceeded(self, minimum_peak_memory_usage: int, timeout_seconds: int) -> None: - assert self.cluster.wait_for_peak_memory_usage_to_exceed(minimum_peak_memory_usage, timeout_seconds) or self.cluster.log_app_output() - - def check_if_memory_usage_is_below(self, maximum_memory_usage: int, timeout_seconds: int) -> None: - assert self.cluster.wait_for_memory_usage_to_drop_below(maximum_memory_usage, timeout_seconds) or self.cluster.log_app_output() - - def check_memory_usage_compared_to_peak(self, peak_multiplier: float, timeout_seconds: int) -> None: - peak_memory = get_peak_memory_usage(get_minifi_pid()) - assert (peak_memory is not None) or self.cluster.log_app_output() - assert (1.0 > peak_multiplier > 0.0) or self.cluster.log_app_output() - assert self.cluster.wait_for_memory_usage_to_drop_below(peak_memory * peak_multiplier, timeout_seconds) or self.cluster.log_app_output() - - def enable_provenance_repository_in_minifi(self): - self.cluster.enable_provenance_repository_in_minifi() - - def enable_c2_in_minifi(self): - self.cluster.enable_c2_in_minifi() - - def enable_c2_with_ssl_in_minifi(self): - self.cluster.enable_c2_with_ssl_in_minifi() - - def fetch_flow_config_from_c2_url_in_minifi(self): - self.cluster.fetch_flow_config_from_c2_url_in_minifi() - - def set_ssl_context_properties_in_minifi(self): - self.cluster.set_ssl_context_properties_in_minifi() - - def enable_sql_in_minifi(self): - self.cluster.enable_sql_in_minifi() - - def set_yaml_in_minifi(self): - self.cluster.set_yaml_in_minifi() - - def set_json_in_minifi(self): - self.cluster.set_json_in_minifi() - - def llama_model_is_downloaded_in_minifi(self): - self.cluster.llama_model_is_downloaded_in_minifi() - - def enable_log_metrics_publisher_in_minifi(self): - self.cluster.enable_log_metrics_publisher_in_minifi() - - def enable_openssl_fips_mode_in_minifi(self): - self.cluster.enable_openssl_fips_mode_in_minifi() - - def disable_openssl_fips_mode_in_minifi(self): - self.cluster.disable_openssl_fips_mode_in_minifi() - - def set_value_on_plc_with_modbus(self, container_name, modbus_cmd): - assert self.cluster.set_value_on_plc_with_modbus(container_name, modbus_cmd) - - def enable_ssl_in_nifi(self): - self.cluster.enable_ssl_in_nifi() diff --git a/docker/test/integration/features/README.md b/docker/test/integration/features/README.md deleted file mode 100644 index 421cfca81e..0000000000 --- a/docker/test/integration/features/README.md +++ /dev/null @@ -1,43 +0,0 @@ - - -# Apache MiNiFi Docker System Integration Tests - -Apache MiNiFi includes a suite of docker-based system integration tests. These -tests are designed to test the integration between distinct MiNiFi instances as -well as other systems which are available in docker, such as Apache NiFi. - -* Currently there is an extra unused test mockup for testing TLS with invoke_http. -* HashContent tests do not actually seem what they advertise to -* There is a test requirement for PublishKafka, confirming it can handle broker outages. This will be reintroduced when ConsumeKafka is on the master and will have its similar testing requirements implemented. - -## Test environment - -The test framework is written in Python 3 and uses pip3 to add required packages. The framework it uses is python-behave, a BDD testing framework. The feature specifications are written in human readable format in the features directory. Please refer to the behave documentation on how the framework performs testing. - -The tests use docker containers so docker engine should be installed on your system. Check the [get docker](https://docs.docker.com/get-docker/) page for further information. - -One of the required python packages is the `m2crypto` package which depends on `swig` for compilation, -so `swig` should also be installed on your system (e.g. `sudo apt install swig` on debian based systems). - -### Execution of one or more flows - -Flows are executed immediately upon deployment and according to schedule -properties defined in the flow.yml. As such, to minimize test latency it is -important to ensure that test inputs are added to the test cluster before flows -are deployed. Filesystem events are monitored using event APIs, ensuring that -flows are executed immediately upon input availability and output is validated -immediately after it is written to disk. - diff --git a/docker/test/integration/features/environment.py b/docker/test/integration/features/environment.py deleted file mode 100644 index f5772d6ac9..0000000000 --- a/docker/test/integration/features/environment.py +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import datetime -import sys -import shortuuid -import os -import platform - -sys.path.append('../minifi') - -from MiNiFi_integration_test_driver import MiNiFi_integration_test # noqa: E402 -from minifi import * # noqa -from cluster.ImageStore import ImageStore # noqa -from cluster.DockerTestDirectoryBindings import DockerTestDirectoryBindings # noqa - - -def inject_feature_id(context, step): - if "${feature_id}" in step.name: - step.name = step.name.replace("${feature_id}", context.feature_id) - if step.table: - for row in step.table: - for i in range(len(row.cells)): - if "${feature_id}" in row.cells[i]: - row.cells[i] = row.cells[i].replace("${feature_id}", context.feature_id) - - -def before_scenario(context, scenario): - if "skip" in scenario.effective_tags: - scenario.skip("Marked with @skip") - return - - logging.info("Integration test setup at {time:%H:%M:%S.%f}".format(time=datetime.datetime.now())) - context.test = MiNiFi_integration_test(context=context, feature_id=context.feature_id) - - for step in scenario.steps: - inject_feature_id(context, step) - - -def after_scenario(context, scenario): - if "skip" in scenario.effective_tags: - logging.info("Scenario was skipped, no need for clean up.") - return - - logging.info("Integration test teardown at {time:%H:%M:%S.%f}".format(time=datetime.datetime.now())) - context.test.cleanup() - context.directory_bindings.cleanup_io() - - -def before_all(context): - context.config.setup_logging() - context.image_store = ImageStore() - - -def before_feature(context, feature): - if "x86_x64_only" in feature.tags: - is_x86 = platform.machine() in ("i386", "AMD64", "x86_64") - if not is_x86: - feature.skip("This feature is only x86/x64 compatible") - - if "SKIP_RPM" in feature.tags and "rpm" in os.environ['MINIFI_TAG_PREFIX']: - feature.skip("This feature is not yet supported on RPM installed images") - - feature_id = shortuuid.uuid() - context.feature_id = feature_id - context.directory_bindings = DockerTestDirectoryBindings(feature_id) - context.directory_bindings.create_new_data_directories() - context.directory_bindings.create_cert_files() - context.root_ca_cert = context.directory_bindings.root_ca_cert - context.root_ca_key = context.directory_bindings.root_ca_key diff --git a/docker/test/integration/features/steps/steps.py b/docker/test/integration/features/steps/steps.py deleted file mode 100644 index 205b38769e..0000000000 --- a/docker/test/integration/features/steps/steps.py +++ /dev/null @@ -1,805 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from filesystem_validation.FileSystemObserver import FileSystemObserver -from minifi.core.RemoteProcessGroup import RemoteProcessGroup -from minifi.core.Funnel import Funnel - -from minifi.controllers.SSLContextService import SSLContextService -from minifi.controllers.ODBCService import ODBCService -from minifi.controllers.JsonRecordSetWriter import JsonRecordSetWriter -from minifi.controllers.JsonTreeReader import JsonTreeReader -from minifi.controllers.XMLReader import XMLReader -from minifi.controllers.XMLRecordSetWriter import XMLRecordSetWriter -from minifi.controllers.XMLReader import XMLReader - -from behave import given, then, when -from behave.model_describe import ModelDescriptor -from pydoc import locate - -import logging -import time -import uuid -import humanfriendly - -import os - - -# Background -@given("the content of \"{directory}\" is monitored") -def step_impl(context, directory): - context.test.add_file_system_observer(FileSystemObserver(context.directory_bindings.docker_path_to_local_path(directory))) - - -@given("there is a \"{subdir}\" subdirectory in the monitored directory") -def step_impl(context, subdir): - output_dir = context.test.file_system_observer.get_output_dir() + "/" + subdir - os.mkdir(output_dir) - os.chmod(output_dir, 0o777) - - -def __create_processor(context, processor_type, processor_name, property_name, property_value, container_name, engine='minifi-cpp'): - container = context.test.acquire_container(context=context, name=container_name, engine=engine) - processor = locate("minifi.processors." + processor_type + "." + processor_type)(context=context) - processor.set_name(processor_name) - if property_name is not None: - processor.set_property(property_name, property_value) - context.test.add_node(processor) - # Assume that the first node declared is primary unless specified otherwise - if not container.get_start_nodes(): - container.add_start_node(processor) - - -# MiNiFi cluster setups -@given("a {processor_type} processor with the name \"{processor_name}\" and the \"{property_name}\" property set to \"{property_value}\" in a \"{minifi_container_name}\" flow") -@given("a {processor_type} processor with the name \"{processor_name}\" and the \"{property_name}\" property set to \"{property_value}\" in the \"{minifi_container_name}\" flow") -def step_impl(context, processor_type, processor_name, property_name, property_value, minifi_container_name): - __create_processor(context, processor_type, processor_name, property_name, property_value, minifi_container_name) - - -@given( - "a {processor_type} processor with the name \"{processor_name}\" and the \"{property_name}\" property set to \"{property_value}\" in the \"{minifi_container_name}\" flow using the \"{engine_name}\" engine") -def step_impl(context, processor_type, processor_name, property_name, property_value, minifi_container_name, engine_name): - __create_processor(context, processor_type, processor_name, property_name, property_value, minifi_container_name, engine_name) - - -@given("a {processor_type} processor with the \"{property_name}\" property set to \"{property_value}\" in a \"{minifi_container_name}\" flow") -@given("a {processor_type} processor with the \"{property_name}\" property set to \"{property_value}\" in the \"{minifi_container_name}\" flow") -def step_impl(context, processor_type, property_name, property_value, minifi_container_name): - __create_processor(context, processor_type, processor_type, property_name, property_value, minifi_container_name) - - -@given("a {processor_type} processor with the \"{property_name}\" property set to \"{property_value}\" in the \"{minifi_container_name}\" flow using the \"{engine_name}\" engine") -def step_impl(context, processor_type, property_name, property_value, minifi_container_name, engine_name): - __create_processor(context, processor_type, processor_type, property_name, property_value, minifi_container_name, engine_name) - - -@given("a {processor_type} processor with the \"{property_name}\" property set to \"{property_value}\"") -def step_impl(context, processor_type, property_name, property_value): - __create_processor(context, processor_type, processor_type, property_name, property_value, "minifi-cpp-flow") - - -@given("a {processor_type} processor with the name \"{processor_name}\" and the \"{property_name}\" property set to \"{property_value}\"") -def step_impl(context, processor_type, property_name, property_value, processor_name): - __create_processor(context, processor_type, processor_name, property_name, property_value, "minifi-cpp-flow") - - -@given("a {processor_type} processor with the name \"{processor_name}\" in the \"{minifi_container_name}\" flow") -def step_impl(context, processor_type, processor_name, minifi_container_name): - __create_processor(context, processor_type, processor_name, None, None, minifi_container_name) - - -@given("a {processor_type} processor with the name \"{processor_name}\" in the \"{minifi_container_name}\" flow using the \"{engine_name}\" engine") -def step_impl(context, processor_type, processor_name, minifi_container_name, engine_name): - __create_processor(context, processor_type, processor_name, None, None, minifi_container_name, engine_name) - - -@given("a {processor_type} processor with the name \"{processor_name}\"") -def step_impl(context, processor_type, processor_name): - __create_processor(context, processor_type, processor_name, None, None, "minifi-cpp-flow") - - -@given("a {processor_type} processor in the \"{minifi_container_name}\" flow") -@given("a {processor_type} processor in a \"{minifi_container_name}\" flow") -@given("a {processor_type} processor set up in a \"{minifi_container_name}\" flow") -def step_impl(context, processor_type, minifi_container_name): - __create_processor(context, processor_type, processor_type, None, None, minifi_container_name) - - -@given("a {processor_type} processor") -@given("a {processor_type} processor set up to communicate with an Azure blob storage") -def step_impl(context, processor_type): - __create_processor(context, processor_type, processor_type, None, None, "minifi-cpp-flow") - - -@given("a set of processors in the \"{minifi_container_name}\" flow") -def step_impl(context, minifi_container_name): - container = context.test.acquire_container(context=context, name=minifi_container_name) - logging.info(context.table) - for row in context.table: - processor = locate("minifi.processors." + row["type"] + "." + row["type"])(context=context) - processor.set_name(row["name"]) - processor.set_uuid(row["uuid"]) - context.test.add_node(processor) - # Assume that the first node declared is primary unless specified otherwise - if not container.get_start_nodes(): - container.add_start_node(processor) - - -@given("a set of processors") -def step_impl(context): - rendered_table = ModelDescriptor.describe_table(context.table, " ") - context.execute_steps("""given a set of processors in the \"{minifi_container_name}\" flow - {table} - """.format(minifi_container_name="minifi-cpp-flow", table=rendered_table)) - - -@given("a RemoteProcessGroup node with name \"{rpg_name}\" is opened on \"{address}\" with transport protocol set to \"{transport_protocol}\"") -def step_impl(context, rpg_name, address, transport_protocol): - remote_process_group = RemoteProcessGroup(address, rpg_name, transport_protocol) - context.test.add_remote_process_group(remote_process_group) - - -@given("a RemoteProcessGroup node with name \"{rpg_name}\" is opened on \"{address}\"") -def step_impl(context, rpg_name, address): - context.execute_steps(f"given a RemoteProcessGroup node with name \"{rpg_name}\" is opened on \"{address}\" with transport protocol set to \"RAW\"") - - -@given("the \"{property_name}\" property of the {processor_name} processor is set to \"{property_value}\"") -def step_impl(context, property_name, processor_name, property_value): - processor = context.test.get_node_by_name(processor_name) - if property_value == "(not set)": - processor.unset_property(property_name) - else: - processor.set_property(property_name, property_value) - - -@given("the \"{property_name}\" property of the {controller_name} controller is set to \"{property_value}\"") -def step_impl(context, property_name, controller_name, property_value): - container = context.test.acquire_container(context=context, name="minifi-cpp-flow") - container.get_controller(controller_name).set_property(property_name, property_value) - - -@given("the \"{property_name}\" properties of the {processor_name_one} and {processor_name_two} processors are set to the same random guid") -def step_impl(context, property_name, processor_name_one, processor_name_two): - uuid_str = str(uuid.uuid4()) - context.test.get_node_by_name(processor_name_one).set_property(property_name, uuid_str) - context.test.get_node_by_name(processor_name_two).set_property(property_name, uuid_str) - - -@given("the max concurrent tasks attribute of the {processor_name} processor is set to {max_concurrent_tasks:d}") -def step_impl(context, processor_name, max_concurrent_tasks): - processor = context.test.get_node_by_name(processor_name) - processor.set_max_concurrent_tasks(max_concurrent_tasks) - - -@given("the \"{property_name}\" property of the {processor_name} processor is set to match the attribute \"{attribute_key}\" to \"{attribute_value}\"") -def step_impl(context, property_name, processor_name, attribute_key, attribute_value): - processor = context.test.get_node_by_name(processor_name) - if attribute_value == "(not set)": - # Ignore filtering - processor.set_property(property_name, "true") - return - filtering = "${" + attribute_key + ":equals('" + attribute_value + "')}" - logging.info("Filter: \"%s\"", filtering) - logging.info("Key: \"%s\", value: \"%s\"", attribute_key, attribute_value) - processor.set_property(property_name, filtering) - - -@given("the scheduling period of the {processor_name} processor is set to \"{scheduling_period}\"") -def step_impl(context, processor_name, scheduling_period): - processor = context.test.get_node_by_name(processor_name) - processor.set_scheduling_strategy("TIMER_DRIVEN") - processor.set_scheduling_period(scheduling_period) - - -@given("these processor properties are set") -@given("these processor properties are set to match the http proxy") -def step_impl(context): - for row in context.table: - context.test.get_node_by_name(row["processor name"]).set_property(row["property name"], row["property value"]) - - -@given("an input port with name \"{port_name}\" is created on the RemoteProcessGroup named \"{rpg_name}\"") -def step_impl(context, port_name, rpg_name): - remote_process_group = context.test.get_remote_process_group_by_name(rpg_name) - input_port_node = context.test.generate_input_port_for_remote_process_group(remote_process_group, port_name) - context.test.add_node(input_port_node) - - -@given("an input port using compression with name \"{port_name}\" is created on the RemoteProcessGroup named \"{rpg_name}\"") -def step_impl(context, port_name, rpg_name): - remote_process_group = context.test.get_remote_process_group_by_name(rpg_name) - input_port_node = context.test.generate_input_port_for_remote_process_group(remote_process_group, port_name, True) - context.test.add_node(input_port_node) - - -@given("an output port with name \"{port_name}\" is created on the RemoteProcessGroup named \"{rpg_name}\"") -def step_impl(context, port_name, rpg_name): - remote_process_group = context.test.get_remote_process_group_by_name(rpg_name) - input_port_node = context.test.generate_output_port_for_remote_process_group(remote_process_group, port_name) - context.test.add_node(input_port_node) - - -@given("an output port using compression with name \"{port_name}\" is created on the RemoteProcessGroup named \"{rpg_name}\"") -def step_impl(context, port_name, rpg_name): - remote_process_group = context.test.get_remote_process_group_by_name(rpg_name) - input_port_node = context.test.generate_output_port_for_remote_process_group(remote_process_group, port_name, True) - context.test.add_node(input_port_node) - - -@given("the output port \"{port_name}\" is connected to the {destination_name} processor") -def step_impl(context, port_name, destination_name): - destination = context.test.get_node_by_name(destination_name) - output_port_node = context.test.get_node_by_name(port_name) - output_port_node.out_proc.connect({"undefined": destination}) - - -@given("the \"{relationship}\" relationship of the {source_name} is connected to the {destination_name}") -@given("the \"{relationship}\" relationship of the {source_name} processor is connected to the {destination_name}") -def step_impl(context, relationship, source_name, destination_name): - source = context.test.get_node_by_name(source_name) - destination = context.test.get_node_by_name(destination_name) - source.out_proc.connect({relationship: destination}) - - -@given("the processors are connected up as described here") -def step_impl(context): - for row in context.table: - context.execute_steps( - "given the \"" + row["relationship name"] + "\" relationship of the " + row["source name"] + " processor is connected to the " + row["destination name"]) - - -@given("the connection going to the RemoteProcessGroup has \"drop empty\" set") -def step_impl(context): - input_port = context.test.get_node_by_name("to_nifi") - input_port.drop_empty_flowfiles = True - - -@given("a file with the content \"{content}\" is present in \"{path}\"") -@given("a file with the content '{content}' is present in '{path}'") -@then("a file with the content \"{content}\" is placed in \"{path}\"") -def step_impl(context, content, path): - context.test.add_test_data(path, content) - - -@given("a file of size {size} is present in \"{path}\"") -def step_impl(context, size: str, path: str): - context.test.add_random_test_data(path, humanfriendly.parse_size(size)) - - -@given("{number_of_files:d} files with the content \"{content}\" are present in \"{path}\"") -def step_impl(context, number_of_files, content, path): - for i in range(0, number_of_files): - context.test.add_test_data(path, content) - - -@given("an empty file is present in \"{path}\"") -def step_impl(context, path): - context.test.add_test_data(path, "") - - -@given("a file with filename \"{file_name}\" and content \"{content}\" is present in \"{path}\"") -def step_impl(context, file_name, content, path): - context.test.add_test_data(path, content, file_name) - - -@given("a Funnel with the name \"{funnel_name}\" is set up") -def step_impl(context, funnel_name): - funnel = Funnel(funnel_name) - context.test.add_node(funnel) - - -@given("the Funnel with the name \"{source_name}\" is connected to the {destination_name}") -def step_impl(context, source_name, destination_name): - source = context.test.get_or_create_node_by_name(source_name) - destination = context.test.get_or_create_node_by_name(destination_name) - source.out_proc.connect({'success': destination}) - - -@given("\"{processor_name}\" processor is a start node") -@given("\"{processor_name}\" port is a start node") -def step_impl(context, processor_name): - container = context.test.acquire_container(context=context, name="minifi-cpp-flow") - processor = context.test.get_or_create_node_by_name(processor_name) - container.add_start_node(processor) - - -# NiFi setups -@given("a NiFi flow is receiving data from the RemoteProcessGroup named \"{rpg_name}\" in an input port named \"{input_port_name}\" which has the same id as the port named \"{rpg_port_name}\"") -def step_impl(context, input_port_name, rpg_port_name, rpg_name): - remote_process_group = context.test.get_remote_process_group_by_name(rpg_name) - source = context.test.generate_input_port_for_remote_process_group(remote_process_group, input_port_name) - source.instance_id = context.test.get_node_by_name(rpg_port_name).instance_id - context.test.add_node(source) - container = context.test.acquire_container(context=context, name='nifi', engine='nifi') - # Assume that the first node declared is primary unless specified otherwise - if not container.get_start_nodes(): - container.add_start_node(source) - - -@given("a NiFi flow is sending data to an output port named \"{port_name}\" with the id of the port named \"{rpg_port_name}\" from the RemoteProcessGroup named \"{rpg_name}\"") -def step_impl(context, port_name, rpg_port_name, rpg_name): - remote_process_group = context.test.get_remote_process_group_by_name(rpg_name) - destination = context.test.generate_output_port_for_remote_process_group(remote_process_group, port_name) - destination.instance_id = context.test.get_node_by_name(rpg_port_name).instance_id - context.test.add_node(destination) - - -@given("a NiFi flow with the name \"{flow_name}\" is set up") -def step_impl(context, flow_name): - context.test.acquire_container(context=context, name=flow_name, engine='nifi') - - -@given("SSL is enabled in NiFi flow") -def step_impl(context): - context.test.enable_ssl_in_nifi() - - -@given("a transient MiNiFi flow with the name \"{flow_name}\" is set up") -def step_impl(context, flow_name): - context.test.acquire_transient_minifi(context=context, name=flow_name) - - -@given("the provenance repository is enabled in MiNiFi") -def step_impl(context): - context.test.enable_provenance_repository_in_minifi() - - -@given("C2 is enabled in MiNiFi") -def step_impl(context): - context.test.enable_c2_in_minifi() - - -@given("log metrics publisher is enabled in MiNiFi") -def step_impl(context): - context.test.enable_log_metrics_publisher_in_minifi() - - -@given("OpenSSL FIPS mode is enabled in MiNiFi") -def step_impl(context): - context.test.enable_openssl_fips_mode_in_minifi() - - -@given("OpenSSL FIPS mode is disabled in MiNiFi") -def step_impl(context): - context.test.disable_openssl_fips_mode_in_minifi() - - -# HTTP proxy setup -@given("the http proxy server is set up") -@given("a http proxy server is set up accordingly") -def step_impl(context): - context.test.acquire_container(context=context, name="http-proxy", engine="http-proxy") - - -# TLS -@given("an ssl context service is set up for {processor_name}") -@given("an ssl context service with a manual CA cert file is set up for {processor_name}") -def step_impl(context, processor_name): - ssl_context_service = SSLContextService(cert='/tmp/resources/minifi_client.crt', - key='/tmp/resources/minifi_client.key', - ca_cert='/tmp/resources/root_ca.crt') - - processor = context.test.get_node_by_name(processor_name) - processor.controller_services.append(ssl_context_service) - processor.set_property('SSL Context Service', ssl_context_service.name) - - -@given("an ssl context service using the system CA cert store is set up for {processor_name}") -def step_impl(context, processor_name): - ssl_context_service = SSLContextService(cert='/tmp/resources/minifi_client.crt', - key='/tmp/resources/minifi_client.key', - use_system_cert_store='true') - - processor = context.test.get_node_by_name(processor_name) - processor.controller_services.append(ssl_context_service) - processor.set_property('SSL Context Service', ssl_context_service.name) - - -# Record set reader and writer -@given("a JsonRecordSetWriter controller service is set up with \"{}\" output grouping in the \"{minifi_container_name}\" flow") -def step_impl(context, output_grouping: str, minifi_container_name: str): - json_record_set_writer = JsonRecordSetWriter(name="JsonRecordSetWriter", output_grouping=output_grouping) - container = context.test.acquire_container(context=context, name=minifi_container_name) - container.add_controller(json_record_set_writer) - - -@given("a JsonTreeReader controller service is set up in the \"{minifi_container_name}\" flow") -def step_impl(context, minifi_container_name: str): - json_record_set_reader = JsonTreeReader("JsonTreeReader") - container = context.test.acquire_container(context=context, name=minifi_container_name) - container.add_controller(json_record_set_reader) - - -@given("a JsonRecordSetWriter controller service is set up with \"{}\" output grouping") -def step_impl(context, output_grouping: str): - context.execute_steps(f"given a JsonRecordSetWriter controller service is set up with \"{output_grouping}\" output grouping in the \"minifi-cpp-flow\" flow") - - -@given("a JsonTreeReader controller service is set up") -def step_impl(context): - context.execute_steps("given a JsonTreeReader controller service is set up in the \"minifi-cpp-flow\" flow") - - -@given("a XMLReader controller service is set up") -def step_impl(context): - xml_reader = XMLReader("XMLReader") - container = context.test.acquire_container(context=context, name="minifi-cpp-flow") - container.add_controller(xml_reader) - - -@given("a XMLRecordSetWriter controller service is set up") -def step_impl(context): - xml_record_set_writer = XMLRecordSetWriter("XMLRecordSetWriter") - container = context.test.acquire_container(context=context, name="minifi-cpp-flow") - container.add_controller(xml_record_set_writer) - - -# azure storage setup -@given("an Azure storage server is set up") -def step_impl(context): - context.test.acquire_container(context=context, name="azure-storage-server", engine="azure-storage-server") - - -# syslog client -@given(u'a Syslog client with {protocol} protocol is setup to send logs to minifi') -def step_impl(context, protocol): - client_name = "syslog-" + protocol.lower() + "-client" - context.test.acquire_container(context=context, name=client_name, engine=client_name) - - -def setUpSslContextServiceForProcessor(context, processor_name: str): - minifi_crt_file = '/tmp/resources/minifi_client.crt' - minifi_key_file = '/tmp/resources/minifi_client.key' - root_ca_crt_file = '/tmp/resources/root_ca.crt' - ssl_context_service = SSLContextService(cert=minifi_crt_file, ca_cert=root_ca_crt_file, key=minifi_key_file) - processor = context.test.get_node_by_name(processor_name) - processor.controller_services.append(ssl_context_service) - processor.set_property("SSL Context Service", ssl_context_service.name) - - -def setUpSslContextServiceForRPG(context, rpg_name: str): - minifi_crt_file = '/tmp/resources/minifi_client.crt' - minifi_key_file = '/tmp/resources/minifi_client.key' - root_ca_crt_file = '/tmp/resources/root_ca.crt' - ssl_context_service = SSLContextService(cert=minifi_crt_file, ca_cert=root_ca_crt_file, key=minifi_key_file) - container = context.test.acquire_container(context=context, name="minifi-cpp-flow") - container.add_controller(ssl_context_service) - rpg = context.test.get_remote_process_group_by_name(rpg_name) - rpg.add_property("SSL Context Service", ssl_context_service.name) - - -# TCP client -@given('a TCP client is set up to send a test TCP message to minifi') -def step_impl(context): - context.test.acquire_container(context=context, name="tcp-client", engine="tcp-client") - - -# SQL -@given("an ODBCService is setup up for {processor_name} with the name \"{service_name}\"") -def step_impl(context, processor_name, service_name): - odbc_service = ODBCService(name=service_name, - connection_string="Driver={{PostgreSQL ANSI}};Server={server_hostname};Port=5432;Database=postgres;Uid=postgres;Pwd=password;".format(server_hostname=context.test.get_container_name_with_postfix("postgresql-server"))) - processor = context.test.get_node_by_name(processor_name) - processor.controller_services.append(odbc_service) - processor.set_property("DB Controller Service", odbc_service.name) - - -@given("a PostgreSQL server is set up") -def step_impl(context): - context.test.enable_sql_in_minifi() - context.test.acquire_container(context=context, name="postgresql-server", engine="postgresql-server") - - -@when("the MiNiFi instance starts up") -@when("both instances start up") -@when("all instances start up") -@when("all other processes start up") -def step_impl(context): - context.test.start() - - -@when("\"{container_name}\" flow is stopped") -def step_impl(context, container_name): - context.test.stop(container_name) - - -@when("\"{container_name}\" flow is restarted") -def step_impl(context, container_name): - context.test.restart(container_name) - - -@then("\"{container_name}\" flow is stopped") -def step_impl(context, container_name): - context.test.stop(container_name) - - -@then("\"{container_name}\" flow is killed") -def step_impl(context, container_name): - context.test.kill(container_name) - - -@then("\"{container_name}\" flow is restarted") -def step_impl(context, container_name): - context.test.restart(container_name) - - -@when("\"{container_name}\" flow is started") -@then("\"{container_name}\" flow is started") -def step_impl(context, container_name): - context.test.start(container_name) - - -@then("{duration} later") -def step_impl(context, duration): - time.sleep(humanfriendly.parse_timespan(duration)) - - -@when("content \"{content}\" is added to file \"{file_name}\" present in directory \"{path}\" {seconds:d} seconds later") -def step_impl(context, content, file_name, path, seconds): - time.sleep(seconds) - context.test.add_test_data(path, content, file_name) - - -@then("a flowfile with the content \"{content}\" is placed in the monitored directory in less than {duration}") -@then("a flowfile with the content '{content}' is placed in the monitored directory in less than {duration}") -@then("{number_of_flow_files:d} flowfiles with the content \"{content}\" are placed in the monitored directory in less than {duration}") -def step_impl(context, content, duration, number_of_flow_files=1): - context.test.check_for_multiple_files_generated(number_of_flow_files, humanfriendly.parse_timespan(duration), [content]) - - -@then("a flowfile with the JSON content \"{content}\" is placed in the monitored directory in less than {duration}") -@then("a flowfile with the JSON content '{content}' is placed in the monitored directory in less than {duration}") -def step_impl(context, content, duration): - context.test.check_for_single_json_file_with_content_generated(content, humanfriendly.parse_timespan(duration)) - - -@then("at least one flowfile's content match the following regex: \"{regex}\" in less than {duration}") -@then("at least one flowfile's content match the following regex: '{regex}' in less than {duration}") -def step_impl(context, regex: str, duration: str): - context.test.check_for_at_least_one_file_with_matching_content(regex, humanfriendly.parse_timespan(duration)) - - -@then("at least one flowfile with the content \"{content}\" is placed in the monitored directory in less than {duration}") -@then("at least one flowfile with the content '{content}' is placed in the monitored directory in less than {duration}") -def step_impl(context, content, duration): - context.test.check_for_at_least_one_file_with_content_generated(content, humanfriendly.parse_timespan(duration)) - - -@then("no files are placed in the monitored directory in {duration} of running time") -def step_impl(context, duration): - context.test.check_for_no_files_generated(humanfriendly.parse_timespan(duration)) - - -@then("there is exactly {num_flowfiles} files in the monitored directory") -def step_impl(context, num_flowfiles): - context.test.check_for_num_files_generated(int(num_flowfiles), humanfriendly.parse_timespan("1")) - - -@then("{num_flowfiles} flowfiles are placed in the monitored directory in less than {duration}") -def step_impl(context, num_flowfiles, duration): - if num_flowfiles == 0: - context.execute_steps(f"no files are placed in the monitored directory in {duration} of running time") - return - context.test.check_for_num_files_generated(int(num_flowfiles), humanfriendly.parse_timespan(duration)) - - -@then("at least one flowfile is placed in the monitored directory in less than {duration}") -def step_impl(context, duration): - context.test.check_for_num_file_range_generated_with_timeout(1, float('inf'), humanfriendly.parse_timespan(duration)) - - -@then("at least one flowfile with minimum size of \"{size}\" is placed in the monitored directory in less than {duration}") -def step_impl(context, duration: str, size: str): - context.test.check_for_num_file_range_and_min_size_generated(1, float('inf'), humanfriendly.parse_size(size), humanfriendly.parse_timespan(duration)) - - -@then("one flowfile with the contents \"{content}\" is placed in the monitored directory in less than {duration}") -def step_impl(context, content, duration): - context.test.check_for_multiple_files_generated(1, humanfriendly.parse_timespan(duration), [content]) - - -@then("two flowfiles with the contents \"{content_1}\" and \"{content_2}\" are placed in the monitored directory in less than {duration}") -@then("two flowfiles with the contents '{content_1}' and '{content_2}' are placed in the monitored directory in less than {duration}") -def step_impl(context, content_1, content_2, duration): - context.test.check_for_multiple_files_generated(2, humanfriendly.parse_timespan(duration), [content_1, content_2]) - - -@then("exactly these flowfiles are in the monitored directory in less than {duration}: \"\"") -def step_impl(context, duration): - context.execute_steps(f"Then no files are placed in the monitored directory in {duration} of running time") - - -@then("exactly these flowfiles are in the monitored directory's \"{subdir}\" subdirectory in less than {duration}: \"\"") -def step_impl(context, duration, subdir): - assert context.test.check_subdirectory(sub_directory=subdir, expected_contents=[], timeout=humanfriendly.parse_timespan(duration)) or context.test.cluster.log_app_output() - - -@then("exactly these flowfiles are in the monitored directory in less than {duration}: \"{contents}\"") -def step_impl(context, duration, contents): - contents_arr = contents.split(",") - context.test.check_for_multiple_files_generated(len(contents_arr), humanfriendly.parse_timespan(duration), contents_arr) - - -@then("exactly these flowfiles are in the monitored directory's \"{subdir}\" subdirectory in less than {duration}: \"{contents}\"") -def step_impl(context, duration, subdir, contents): - contents_arr = contents.split(",") - assert context.test.check_subdirectory(sub_directory=subdir, expected_contents=contents_arr, timeout=humanfriendly.parse_timespan(duration)) or context.test.cluster.log_app_output() - - -@then("flowfiles with these contents are placed in the monitored directory in less than {duration}: \"{contents}\"") -def step_impl(context, duration, contents): - contents_arr = contents.split(",") - context.test.check_for_multiple_files_generated(0, humanfriendly.parse_timespan(duration), contents_arr) - - -@then("after a wait of {duration}, at least {lower_bound:d} and at most {upper_bound:d} flowfiles are produced and placed in the monitored directory") -def step_impl(context, lower_bound, upper_bound, duration): - context.test.check_for_num_file_range_generated_after_wait(lower_bound, upper_bound, humanfriendly.parse_timespan(duration)) - - -@then("{number_of_files:d} flowfiles are placed in the monitored directory in {duration}") -@then("{number_of_files:d} flowfile is placed in the monitored directory in {duration}") -def step_impl(context, number_of_files, duration): - context.test.check_for_multiple_files_generated(number_of_files, humanfriendly.parse_timespan(duration)) - - -@then("at least one empty flowfile is placed in the monitored directory in less than {duration}") -def step_impl(context, duration): - context.test.check_for_an_empty_file_generated(humanfriendly.parse_timespan(duration)) - - -@then("no errors were generated on the http-proxy regarding \"{url}\"") -def step_impl(context, url): - context.test.check_http_proxy_access('http-proxy', url) - - -# Azure -@when("test blob \"{blob_name}\" with the content \"{content}\" is created on Azure blob storage") -def step_impl(context, blob_name, content): - context.test.add_test_blob(blob_name, content, False) - - -@when("test blob \"{blob_name}\" with the content \"{content}\" and a snapshot is created on Azure blob storage") -def step_impl(context, blob_name, content): - context.test.add_test_blob(blob_name, content, True) - - -@when("test blob \"{blob_name}\" is created on Azure blob storage") -def step_impl(context, blob_name): - context.test.add_test_blob(blob_name, "", False) - - -@when("test blob \"{blob_name}\" is created on Azure blob storage with a snapshot") -def step_impl(context, blob_name): - context.test.add_test_blob(blob_name, "", True) - - -@then("the object on the Azure storage server is \"{object_data}\"") -def step_impl(context, object_data): - context.test.check_azure_storage_server_data("azure-storage-server", object_data) - - -@then("the Azure blob storage becomes empty in {timeout_seconds:d} seconds") -def step_impl(context, timeout_seconds): - context.test.check_azure_blob_storage_is_empty(timeout_seconds) - - -@then("the blob and snapshot count becomes {blob_and_snapshot_count:d} in {timeout_seconds:d} seconds") -def step_impl(context, blob_and_snapshot_count, timeout_seconds): - context.test.check_azure_blob_and_snapshot_count(blob_and_snapshot_count, timeout_seconds) - - -# SQL -@then("the query \"{query}\" returns {number_of_rows:d} rows in less than {timeout_seconds:d} seconds on the PostgreSQL server") -def step_impl(context, query: str, number_of_rows: int, timeout_seconds: int): - context.test.check_query_results(context.test.get_container_name_with_postfix("postgresql-server"), query, number_of_rows, timeout_seconds) - - -@then("the Minifi logs contain the following message: \"{log_message}\" in less than {duration}") -@then("the Minifi logs contain the following message: '{log_message}' in less than {duration}") -def step_impl(context, log_message, duration): - context.test.check_minifi_log_contents(log_message, humanfriendly.parse_timespan(duration)) - - -@then("the Minifi logs contain the following message: \"{log_message}\" {count:d} times after {seconds:d} seconds") -def step_impl(context, log_message, count, seconds): - time.sleep(seconds) - context.test.check_minifi_log_contents(log_message, 1, count) - - -@then("the Minifi logs do not contain the following message: \"{log_message}\" after {seconds:d} seconds") -def step_impl(context, log_message, seconds): - context.test.check_minifi_log_does_not_contain(log_message, seconds) - - -@then("the Minifi logs match the following regex: \"{regex}\" in less than {duration}") -def step_impl(context, regex, duration): - context.test.check_minifi_log_matches_regex(regex, humanfriendly.parse_timespan(duration)) - - -@then("the \"{minifi_container_name}\" flow has a log line matching \"{log_pattern}\" in less than {duration}") -def step_impl(context, minifi_container_name, log_pattern, duration): - context.test.check_container_log_matches_regex(minifi_container_name, log_pattern, humanfriendly.parse_timespan(duration), count=1) - - -@given("SSL properties are set in MiNiFi") -def step_impl(context): - context.test.set_ssl_context_properties_in_minifi() - - -@given(u'flow configuration path is set up in flow url property') -def step_impl(context): - context.test.acquire_container(context=context, name="minifi-cpp-flow", engine="minifi-cpp") - context.test.fetch_flow_config_from_c2_url_in_minifi() - - -# MiNiFi memory usage -@then(u'the peak memory usage of the agent is more than {size} in less than {duration}') -def step_impl(context, size: str, duration: str) -> None: - context.test.check_if_peak_memory_usage_exceeded(humanfriendly.parse_size(size), humanfriendly.parse_timespan(duration)) - - -@then(u'the memory usage of the agent is less than {size} in less than {duration}') -def step_impl(context, size: str, duration: str) -> None: - context.test.check_if_memory_usage_is_below(humanfriendly.parse_size(size), humanfriendly.parse_timespan(duration)) - - -@then(u'the memory usage of the agent decreases to {peak_usage_percent}% peak usage in less than {duration}') -def step_impl(context, peak_usage_percent: str, duration: str) -> None: - context.test.check_memory_usage_compared_to_peak(float(peak_usage_percent) * 0.01, humanfriendly.parse_timespan(duration)) - - -@given(u'a MiNiFi CPP server with yaml config') -def step_impl(context): - context.test.set_yaml_in_minifi() - - -@given(u'a MiNiFi CPP server with json config') -def step_impl(context): - context.test.set_json_in_minifi() - - -@given(u'a SSL context service is set up for the following processor: \"{processor_name}\"') -def step_impl(context, processor_name: str): - setUpSslContextServiceForProcessor(context, processor_name) - - -@given(u'a SSL context service is set up for the following remote process group: \"{remote_process_group}\"') -def step_impl(context, remote_process_group: str): - setUpSslContextServiceForRPG(context, remote_process_group) - - -@given("a non-sensitive parameter in the flow config called '{parameter_name}' with the value '{parameter_value}' in the parameter context '{parameter_context_name}'") -def step_impl(context, parameter_context_name, parameter_name, parameter_value): - container = context.test.acquire_container(context=context, name='minifi-cpp-flow', engine='minifi-cpp') - container.add_parameter_to_flow_config(parameter_context_name, parameter_name, parameter_value) - - -@given("parameter context name is set to '{parameter_context_name}'") -def step_impl(context, parameter_context_name): - container = context.test.acquire_container(context=context, name='minifi-cpp-flow', engine='minifi-cpp') - container.set_parameter_context_name(parameter_context_name) - - -@given("a LlamaCpp model is present on the MiNiFi host") -def step_impl(context): - context.test.llama_model_is_downloaded_in_minifi() - - -@when(u'NiFi is started') -def step_impl(context): - context.test.start_nifi(context) diff --git a/docker/test/integration/features/utils.py b/docker/test/integration/features/utils.py deleted file mode 100644 index bd86cbab09..0000000000 --- a/docker/test/integration/features/utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import time -import functools -import os -import subprocess -from typing import Optional - - -def retry_check(max_tries=5, retry_interval=1): - def retry_check_func(func): - @functools.wraps(func) - def retry_wrapper(*args, **kwargs): - for _ in range(max_tries): - if func(*args, **kwargs): - return True - time.sleep(retry_interval) - return False - return retry_wrapper - return retry_check_func - - -def decode_escaped_str(str): - special = {"n": "\n", "v": "\v", "t": "\t", "f": "\f", "r": "\r", "a": "\a", "\\": "\\"} - escaped = False - result = "" - for ch in str: - if escaped: - if ch in special: - result += special[ch] - else: - result += "\\" + ch - escaped = False - elif ch == "\\": - escaped = True - else: - result += ch - if escaped: - result += "\\" - return result - - -def is_temporary_output_file(filepath): - return filepath.split(os.path.sep)[-1][0] == '.' - - -def get_minifi_pid() -> int: - return int(subprocess.run(["pidof", "-s", "minifi"], capture_output=True).stdout) - - -def get_peak_memory_usage(pid: int) -> Optional[int]: - with open("/proc/" + str(pid) + "/status") as stat_file: - for line in stat_file: - if "VmHWM" in line: - peak_resident_set_size = [int(s) for s in line.split() if s.isdigit()].pop() - return peak_resident_set_size * 1024 - return None - - -def get_memory_usage(pid: int) -> Optional[int]: - with open("/proc/" + str(pid) + "/status") as stat_file: - for line in stat_file: - if "VmRSS" in line: - resident_set_size = [int(s) for s in line.split() if s.isdigit()].pop() - return resident_set_size * 1024 - return None - - -def wait_for(action, timeout_seconds, check_period=1, *args, **kwargs): - start_time = time.perf_counter() - while True: - result = action(*args, **kwargs) - if result: - return result - time.sleep(check_period) - if timeout_seconds < (time.perf_counter() - start_time): - break - return False diff --git a/docker/test/integration/filesystem_validation/FileSystemObserver.py b/docker/test/integration/filesystem_validation/FileSystemObserver.py deleted file mode 100644 index 98874622f6..0000000000 --- a/docker/test/integration/filesystem_validation/FileSystemObserver.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import time -from threading import Event - -from watchdog.observers import Observer -from .OutputEventHandler import OutputEventHandler - - -class FileSystemObserver(object): - def __init__(self, test_output_dir): - - self.test_output_dir = test_output_dir - - # Start observing output dir - self.done_event = Event() - self.event_handler = OutputEventHandler(self.done_event) - self.observer = Observer() - self.observer.schedule(self.event_handler, self.test_output_dir, recursive=True) - self.observer.start() - - def get_output_dir(self): - return self.test_output_dir - - def restart_observer_if_needed(self): - if self.observer.is_alive(): - return - - self.observer = Observer() - self.done_event.clear() - self.observer.schedule(self.event_handler, self.test_output_dir, recursive=True) - self.observer.start() - - def validate_output(self, timeout_seconds, output_validator, max_files=0): - logging.info('Waiting up to %d seconds for valid test outputs (maximum of %d files)', timeout_seconds, max_files) - self.restart_observer_if_needed() - try: - if max_files and max_files <= self.event_handler.get_num_files_created(): - return output_validator.validate() - wait_start_time = time.perf_counter() - while True: - # Note: The timing on Event.wait() is inaccurate - self.done_event.wait(timeout_seconds - time.perf_counter() + wait_start_time) - if self.done_event.is_set(): - self.done_event.clear() - if max_files and max_files <= self.event_handler.get_num_files_created(): - return output_validator.validate() - if output_validator.validate(): - return True - if timeout_seconds < (time.perf_counter() - wait_start_time): - return output_validator.validate() - finally: - self.observer.stop() - self.observer.join() diff --git a/docker/test/integration/filesystem_validation/OutputEventHandler.py b/docker/test/integration/filesystem_validation/OutputEventHandler.py deleted file mode 100644 index 945aca15d0..0000000000 --- a/docker/test/integration/filesystem_validation/OutputEventHandler.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import threading -import os -from utils import is_temporary_output_file - -from watchdog.events import FileSystemEventHandler - - -class OutputEventHandler(FileSystemEventHandler): - def __init__(self, done_event): - self.done_event = done_event - self.files_created_lock = threading.Lock() - self.files_created = set() - - def get_num_files_created(self): - with self.files_created_lock: - logging.info("file count created: %d", len(self.files_created)) - return len(self.files_created) - - def on_created(self, event): - if os.path.isfile(event.src_path) and not is_temporary_output_file(event.src_path): - logging.info("Output file created: %s", event.src_path) - with self.files_created_lock: - self.files_created.add(event.src_path) - self.done_event.set() - - def on_modified(self, event): - if os.path.isfile(event.src_path) and not is_temporary_output_file(event.src_path): - logging.info("Output file modified: %s", event.src_path) - with self.files_created_lock: - self.files_created.add(event.src_path) - self.done_event.set() - - def on_moved(self, event): - if os.path.isfile(event.dest_path): - logging.info("Output file moved from: %s to: %s", event.src_path, event.dest_path) - file_count_modified = False - if event.src_path in self.files_created: - self.files_created.remove(event.src_path) - file_count_modified = True - - if not is_temporary_output_file(event.dest_path): - with self.files_created_lock: - self.files_created.add(event.dest_path) - file_count_modified = True - - if file_count_modified: - self.done_event.set() diff --git a/docker/test/integration/filesystem_validation/__init__.py b/docker/test/integration/filesystem_validation/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/minifi/__init__.py b/docker/test/integration/minifi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/minifi/controllers/JsonRecordSetWriter.py b/docker/test/integration/minifi/controllers/JsonRecordSetWriter.py deleted file mode 100644 index ca00764b5a..0000000000 --- a/docker/test/integration/minifi/controllers/JsonRecordSetWriter.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.ControllerService import ControllerService - - -class JsonRecordSetWriter(ControllerService): - def __init__(self, name=None, output_grouping='One Line Per Object'): - super(JsonRecordSetWriter, self).__init__(name=name) - self.service_class = 'JsonRecordSetWriter' - self.properties['Output Grouping'] = output_grouping diff --git a/docker/test/integration/minifi/controllers/JsonTreeReader.py b/docker/test/integration/minifi/controllers/JsonTreeReader.py deleted file mode 100644 index fe83ada66b..0000000000 --- a/docker/test/integration/minifi/controllers/JsonTreeReader.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.ControllerService import ControllerService - - -class JsonTreeReader(ControllerService): - def __init__(self, name=None): - super(JsonTreeReader, self).__init__(name=name) - self.service_class = 'JsonTreeReader' diff --git a/docker/test/integration/minifi/controllers/ODBCService.py b/docker/test/integration/minifi/controllers/ODBCService.py deleted file mode 100644 index 96fa1d800d..0000000000 --- a/docker/test/integration/minifi/controllers/ODBCService.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.ControllerService import ControllerService - - -class ODBCService(ControllerService): - def __init__(self, name=None, connection_string=None): - super(ODBCService, self).__init__(name=name) - - self.service_class = 'ODBCService' - - if connection_string is not None: - self.properties['Connection String'] = connection_string diff --git a/docker/test/integration/minifi/controllers/SSLContextService.py b/docker/test/integration/minifi/controllers/SSLContextService.py deleted file mode 100644 index af16368f35..0000000000 --- a/docker/test/integration/minifi/controllers/SSLContextService.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.ControllerService import ControllerService - - -class SSLContextService(ControllerService): - def __init__(self, name=None, cert=None, key=None, ca_cert=None, passphrase=None, use_system_cert_store=None): - super(SSLContextService, self).__init__(name=name) - - self.service_class = 'SSLContextService' - - if cert is not None: - self.properties['Client Certificate'] = cert - - if key is not None: - self.properties['Private Key'] = key - - if ca_cert is not None: - self.properties['CA Certificate'] = ca_cert - - if passphrase is not None: - self.properties['Passphrase'] = passphrase - - if use_system_cert_store is not None: - self.properties['Use System Cert Store'] = use_system_cert_store diff --git a/docker/test/integration/minifi/controllers/XMLReader.py b/docker/test/integration/minifi/controllers/XMLReader.py deleted file mode 100644 index 9873491029..0000000000 --- a/docker/test/integration/minifi/controllers/XMLReader.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ..core.ControllerService import ControllerService - - -class XMLReader(ControllerService): - def __init__(self, name=None): - super(XMLReader, self).__init__(name=name) - self.service_class = 'XMLReader' diff --git a/docker/test/integration/minifi/controllers/XMLRecordSetWriter.py b/docker/test/integration/minifi/controllers/XMLRecordSetWriter.py deleted file mode 100644 index 82c2df6f3b..0000000000 --- a/docker/test/integration/minifi/controllers/XMLRecordSetWriter.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.ControllerService import ControllerService - - -class XMLRecordSetWriter(ControllerService): - def __init__(self, name=None): - super(XMLRecordSetWriter, self).__init__(name=name) - self.service_class = 'XMLRecordSetWriter' diff --git a/docker/test/integration/minifi/controllers/__init__.py b/docker/test/integration/minifi/controllers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/minifi/core/Connectable.py b/docker/test/integration/minifi/core/Connectable.py deleted file mode 100644 index dbf43544c9..0000000000 --- a/docker/test/integration/minifi/core/Connectable.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import uuid - - -class Connectable(object): - def __init__(self, - name=None, - auto_terminate=None): - - self.uuid = uuid.uuid4() - self.instance_id = uuid.uuid4() - - if name is None: - self.name = "node_of_" + str(self.uuid) - else: - self.name = name - - if auto_terminate is None: - self.auto_terminate = [] - else: - self.auto_terminate = auto_terminate - - self.connections = {} - self.out_proc = self - - self.drop_empty_flowfiles = False - - def connect(self, connections): - for rel in connections: - - # Ensure that rel is not auto-terminated - if rel in self.auto_terminate: - del self.auto_terminate[self.auto_terminate.index(rel)] - - # Add to set of output connections for this rel - if rel not in self.connections: - self.connections[rel] = [] - self.connections[rel].append(connections[rel]) - - return self - - def get_name(self): - return self.name - - def set_name(self, name): - self.name = name - - def get_uuid(self): - return self.uuid - - def set_uuid(self, uuid): - self.uuid = uuid - - def id_for_connection(self): - return self.uuid diff --git a/docker/test/integration/minifi/core/ControllerService.py b/docker/test/integration/minifi/core/ControllerService.py deleted file mode 100644 index bc26c27bce..0000000000 --- a/docker/test/integration/minifi/core/ControllerService.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import uuid -import logging - - -class ControllerService(object): - def __init__(self, name=None, properties=None): - - self.id = str(uuid.uuid4()) - self.instance_id = str(uuid.uuid4()) - - if name is None: - self.name = str(uuid.uuid4()) - logging.info('Controller service name was not provided; using generated name \'%s\'', self.name) - else: - self.name = name - - if properties is None: - properties = {} - - self.properties = properties - self.linked_services = [] - - def set_property(self, name, value): - self.properties[name] = value diff --git a/docker/test/integration/minifi/core/Funnel.py b/docker/test/integration/minifi/core/Funnel.py deleted file mode 100644 index f58fdebc24..0000000000 --- a/docker/test/integration/minifi/core/Funnel.py +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .Connectable import Connectable - - -class Funnel(Connectable): - def __init__(self, name=None): - super(Funnel, self).__init__(name=name) diff --git a/docker/test/integration/minifi/core/InputPort.py b/docker/test/integration/minifi/core/InputPort.py deleted file mode 100644 index 968ef76dc5..0000000000 --- a/docker/test/integration/minifi/core/InputPort.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .Connectable import Connectable - - -class InputPort(Connectable): - def __init__(self, name=None, remote_process_group=None): - super(InputPort, self).__init__(name=name) - - self.remote_process_group = remote_process_group - self.use_compression = False - self.properties = {} - if self.remote_process_group: - self.properties = self.remote_process_group.properties - - def id_for_connection(self): - return self.instance_id - - def set_use_compression(self, use_compression: bool): - self.use_compression = use_compression diff --git a/docker/test/integration/minifi/core/OutputPort.py b/docker/test/integration/minifi/core/OutputPort.py deleted file mode 100644 index 5186ac330e..0000000000 --- a/docker/test/integration/minifi/core/OutputPort.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .Connectable import Connectable - - -class OutputPort(Connectable): - def __init__(self, name=None, remote_process_group=None): - super(OutputPort, self).__init__(name=name) - - self.remote_process_group = remote_process_group - self.use_compression = False - self.properties = {} - if self.remote_process_group: - self.properties = self.remote_process_group.properties - - def id_for_connection(self): - return self.instance_id - - def set_use_compression(self, use_compression: bool): - self.use_compression = use_compression diff --git a/docker/test/integration/minifi/core/Processor.py b/docker/test/integration/minifi/core/Processor.py deleted file mode 100644 index 8dd0493fd0..0000000000 --- a/docker/test/integration/minifi/core/Processor.py +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .Connectable import Connectable - - -class Processor(Connectable): - def __init__(self, - context, - clazz, - properties=None, - schedule=None, - name=None, - controller_services=None, - auto_terminate=None, - class_prefix='org.apache.nifi.processors.standard.', - max_concurrent_tasks=1): - - super(Processor, self).__init__(name=name, - auto_terminate=auto_terminate) - - self.context = context - self.class_prefix = class_prefix - - if controller_services is None: - controller_services = [] - - if schedule is None: - schedule = {} - - if properties is None: - properties = {} - - self.clazz = clazz - - self.properties = properties - self.controller_services = controller_services - self.max_concurrent_tasks = max_concurrent_tasks - - self.schedule = { - 'scheduling strategy': 'TIMER_DRIVEN', - 'scheduling period': '1 sec', - 'penalization period': '30 sec', - 'yield period': '1 sec', - 'run duration nanos': 0 - } - self.schedule.update(schedule) - - def set_property(self, key, value): - if value.isdigit(): - self.properties[key] = int(value) - else: - self.properties[key] = value - - def set_max_concurrent_tasks(self, max_concurrent_tasks): - self.max_concurrent_tasks = max_concurrent_tasks - - def unset_property(self, key): - self.properties.pop(key, None) - - def set_scheduling_strategy(self, value): - self.schedule["scheduling strategy"] = value - - def set_scheduling_period(self, value): - self.schedule["scheduling period"] = value - - def nifi_property_key(self, key): - """ - Returns the Apache NiFi-equivalent property key for the given key. This is often, but not always, the same as - the internal key. - """ - return key diff --git a/docker/test/integration/minifi/core/RemoteProcessGroup.py b/docker/test/integration/minifi/core/RemoteProcessGroup.py deleted file mode 100644 index 84ecc8e6fc..0000000000 --- a/docker/test/integration/minifi/core/RemoteProcessGroup.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import uuid - - -class RemoteProcessGroup(object): - def __init__(self, url, name=None, transport_protocol="RAW"): - self.uuid = uuid.uuid4() - - if name is None: - self.name = str(self.uuid) - else: - self.name = name - - self.url = url - self.properties = {} - self.transport_protocol = transport_protocol - - def get_name(self): - return self.name - - def get_uuid(self): - return self.uuid - - def add_property(self, name, value): - self.properties[name] = value diff --git a/docker/test/integration/minifi/core/__init__.py b/docker/test/integration/minifi/core/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/minifi/flow_serialization/Minifi_flow_json_serializer.py b/docker/test/integration/minifi/flow_serialization/Minifi_flow_json_serializer.py deleted file mode 100644 index 7082d78209..0000000000 --- a/docker/test/integration/minifi/flow_serialization/Minifi_flow_json_serializer.py +++ /dev/null @@ -1,209 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import uuid -import json - -from ..core.Processor import Processor -from ..core.InputPort import InputPort -from ..core.OutputPort import OutputPort -from ..core.Funnel import Funnel - - -class Minifi_flow_json_serializer: - def serialize(self, start_nodes, controllers, parameter_context_name: str, parameter_contexts): - res = { - 'parameterContexts': [], - 'rootGroup': { - 'name': 'MiNiFi Flow', - 'processors': [], - 'funnels': [], - 'connections': [], - 'remoteProcessGroups': [], - 'controllerServices': [], - 'inputPorts': [], - 'outputPorts': [] - } - } - visited = [] - - if parameter_context_name: - res['rootGroup']['parameterContextName'] = parameter_context_name - - if parameter_contexts: - for context_name in parameter_contexts: - res['parameterContexts'].append({ - 'identifier': str(uuid.uuid4()), - 'name': context_name, - 'parameters': [] - }) - for parameter in parameter_contexts[context_name]: - res['parameterContexts'][-1]['parameters'].append({ - 'name': parameter.name, - 'description': '', - 'sensitive': False, - 'value': parameter.value - }) - - for node in start_nodes: - self.serialize_node(node, res['rootGroup'], visited) - - for controller in controllers: - self.serialize_controller(controller, res['rootGroup']) - - return json.dumps(res) - - def serialize_node(self, connectable, root, visited): - visited.append(connectable) - - if hasattr(connectable, 'name'): - connectable_name = connectable.name - else: - connectable_name = str(connectable.uuid) - - if isinstance(connectable, InputPort): - group = connectable.remote_process_group - if group is None: - root['inputPorts'].append({ - 'name': connectable_name, - 'identifier': str(connectable.instance_id), - 'properties': connectable.properties - }) - else: - res_group = None - - for res_group_candidate in root['remoteProcessGroups']: - assert isinstance(res_group_candidate, dict) - if res_group_candidate['identifier'] == str(group.uuid): - res_group = res_group_candidate - - if res_group is None: - res_group = { - 'name': group.name, - 'identifier': str(group.uuid), - 'targetUri': group.url, - 'communicationsTimeout': '30 sec', - 'yieldDuration': '3 sec', - 'transportProtocol': group.transport_protocol, - 'inputPorts': [] - } - - root['remoteProcessGroups'].append(res_group) - - res_group['inputPorts'].append({ - 'identifier': str(connectable.instance_id), - 'name': connectable.name, - 'useCompression': connectable.use_compression, - 'properties': connectable.properties - }) - - if isinstance(connectable, OutputPort): - group = connectable.remote_process_group - if group is None: - root['outputPorts'].append({ - 'name': connectable_name, - 'identifier': str(connectable.instance_id), - 'properties': connectable.properties - }) - else: - res_group = None - - for res_group_candidate in root['remoteProcessGroups']: - assert isinstance(res_group_candidate, dict) - if res_group_candidate['identifier'] == str(group.uuid): - res_group = res_group_candidate - - if res_group is None: - res_group = { - 'name': group.name, - 'identifier': str(group.uuid), - 'targetUri': group.url, - 'communicationsTimeout': '30 sec', - 'yieldDuration': '3 sec', - 'transportProtocol': group.transport_protocol, - 'outputPorts': [] - } - - root['remoteProcessGroups'].append(res_group) - - res_group['outputPorts'].append({ - 'identifier': str(connectable.instance_id), - 'name': connectable.name, - 'properties': connectable.properties - }) - - if isinstance(connectable, Processor): - root['processors'].append({ - 'name': connectable_name, - 'identifier': str(connectable.uuid), - 'type': connectable.class_prefix + connectable.clazz, - 'schedulingStrategy': connectable.schedule['scheduling strategy'], - 'schedulingPeriod': connectable.schedule['scheduling period'], - 'penaltyDuration': connectable.schedule['penalization period'], - 'yieldDuration': connectable.schedule['yield period'], - 'runDurationMillis': connectable.schedule['run duration nanos'], - 'properties': connectable.properties, - 'autoTerminatedRelationships': connectable.auto_terminate, - 'concurrentlySchedulableTaskCount': connectable.max_concurrent_tasks - }) - - for svc in connectable.controller_services: - if svc in visited: - continue - - visited.append(svc) - self.serialize_controller(svc, root) - - if isinstance(connectable, Funnel): - root['funnels'].append({ - 'identifier': str(connectable.uuid) - }) - - for conn_name in connectable.connections: - conn_procs = connectable.connections[conn_name] - - if not isinstance(conn_procs, list): - conn_procs = [conn_procs] - - for proc in conn_procs: - root['connections'].append({ - 'name': str(uuid.uuid4()), - 'source': {'id': str(connectable.id_for_connection())}, - 'destination': {'id': str(proc.id_for_connection())} - }) - if (all(str(connectable.uuid) != x['identifier'] for x in root['funnels'])): - root['connections'][-1]['selectedRelationships'] = [conn_name] - if proc not in visited: - self.serialize_node(proc, root, visited) - - def serialize_controller(self, controller, root): - if hasattr(controller, 'name'): - connectable_name = controller.name - else: - connectable_name = str(controller.uuid) - - root['controllerServices'].append({ - 'name': connectable_name, - 'identifier': controller.id, - 'type': controller.service_class, - 'properties': controller.properties - }) - - if controller.linked_services: - if len(controller.linked_services) == 1: - root['controllerServices'][-1]['properties']['Linked Services'] = controller.linked_services[0].name - else: - root['controllerServices'][-1]['properties']['Linked Services'] = [{"value": service.name} for service in controller.linked_services] diff --git a/docker/test/integration/minifi/flow_serialization/Minifi_flow_yaml_serializer.py b/docker/test/integration/minifi/flow_serialization/Minifi_flow_yaml_serializer.py deleted file mode 100644 index c3e733d26e..0000000000 --- a/docker/test/integration/minifi/flow_serialization/Minifi_flow_yaml_serializer.py +++ /dev/null @@ -1,244 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import uuid -import yaml - -from ..core.Processor import Processor -from ..core.InputPort import InputPort -from ..core.OutputPort import OutputPort -from ..core.Funnel import Funnel - - -class Minifi_flow_yaml_serializer: - def serialize(self, start_nodes, controllers, parameter_context_name: str, parameter_contexts): - res = { - 'Flow Controller': { - 'name': 'MiNiFi Flow' - }, - 'Processors': [], - 'Funnels': [], - 'Connections': [], - 'Remote Processing Groups': [], - 'Controller Services': [], - 'Input Ports': [], - 'Output Ports': [] - } - visited = [] - - if parameter_context_name: - res['Parameter Context Name'] = parameter_context_name - - if parameter_contexts: - res['Parameter Contexts'] = [] - for context_name in parameter_contexts: - res['Parameter Contexts'].append({ - 'id': str(uuid.uuid4()), - 'name': context_name, - 'Parameters': [] - }) - for parameter in parameter_contexts[context_name]: - res['Parameter Contexts'][-1]['Parameters'].append({ - 'name': parameter.name, - 'description': '', - 'sensitive': False, - 'value': parameter.value - }) - - for node in start_nodes: - res, visited = self.serialize_node(node, res, visited) - - for controller in controllers: - res = self.serialize_controller(controller, res) - - return yaml.dump(res, default_flow_style=False) - - def serialize_node(self, connectable, res=None, visited=None): - visited.append(connectable) - - if hasattr(connectable, 'name'): - connectable_name = connectable.name - else: - connectable_name = str(connectable.uuid) - - if isinstance(connectable, InputPort): - group = connectable.remote_process_group - if group is None: - res['Input Ports'].append({ - 'name': connectable_name, - 'id': str(connectable.instance_id), - 'max concurrent tasks': 1, - 'Properties': connectable.properties - }) - else: - res_group = None - - for res_group_candidate in res['Remote Processing Groups']: - assert isinstance(res_group_candidate, dict) - if res_group_candidate['id'] == str(group.uuid): - res_group = res_group_candidate - - if res_group is None: - res_group = { - 'name': group.name, - 'id': str(group.uuid), - 'url': group.url, - 'timeout': '30 sec', - 'yield period': '3 sec', - 'transport protocol': group.transport_protocol, - 'Input Ports': [] - } - - res['Remote Processing Groups'].append(res_group) - - res_group['Input Ports'].append({ - 'id': str(connectable.instance_id), - 'name': connectable.name, - 'use compression': connectable.use_compression, - 'max concurrent tasks': 1, - 'Properties': connectable.properties - }) - - if isinstance(connectable, OutputPort): - group = connectable.remote_process_group - if group is None: - res['Output Ports'].append({ - 'name': connectable_name, - 'id': str(connectable.instance_id), - 'max concurrent tasks': 1, - 'Properties': connectable.properties - }) - else: - res_group = None - - for res_group_candidate in res['Remote Processing Groups']: - assert isinstance(res_group_candidate, dict) - if res_group_candidate['id'] == str(group.uuid): - res_group = res_group_candidate - - if res_group is None: - res_group = { - 'name': group.name, - 'id': str(group.uuid), - 'url': group.url, - 'timeout': '30 sec', - 'yield period': '3 sec', - 'transport protocol': group.transport_protocol, - 'Output Ports': [] - } - - res['Remote Processing Groups'].append(res_group) - - res_group['Output Ports'].append({ - 'id': str(connectable.instance_id), - 'name': connectable.name, - 'max concurrent tasks': 1, - 'Properties': connectable.properties - }) - - if isinstance(connectable, Processor): - res['Processors'].append({ - 'name': connectable_name, - 'id': str(connectable.uuid), - 'class': connectable.class_prefix + connectable.clazz, - 'scheduling strategy': connectable.schedule['scheduling strategy'], - 'scheduling period': connectable.schedule['scheduling period'], - 'penalization period': connectable.schedule['penalization period'], - 'yield period': connectable.schedule['yield period'], - 'run duration nanos': connectable.schedule['run duration nanos'], - 'Properties': connectable.properties, - 'auto-terminated relationships list': connectable.auto_terminate, - 'max concurrent tasks': connectable.max_concurrent_tasks - }) - - for svc in connectable.controller_services: - if svc in visited: - continue - - visited.append(svc) - self._add_controller_service_node(svc, res) - - if isinstance(connectable, Funnel): - res['Funnels'].append({ - 'id': str(connectable.uuid) - }) - - for conn_name in connectable.connections: - conn_procs = connectable.connections[conn_name] - - if isinstance(conn_procs, list): - for proc in conn_procs: - res['Connections'].append({ - 'name': str(uuid.uuid4()), - 'source id': str(connectable.id_for_connection()), - 'destination id': str(proc.id_for_connection()), - 'drop empty': ("true" if proc.drop_empty_flowfiles else "false") - }) - if (all(str(connectable.uuid) != x['id'] for x in res['Funnels'])): - res['Connections'][-1]['source relationship name'] = conn_name - if proc not in visited: - self.serialize_node(proc, res, visited) - else: - res['Connections'].append({ - 'name': str(uuid.uuid4()), - 'source id': str(connectable.id_for_connection()), - 'destination id': str(conn_procs.id_for_connection()), - 'drop empty': ("true" if proc.drop_empty_flowfiles else "false") - }) - if (all(str(connectable.uuid) != x['id'] for x in res['Funnels'])): - res['Connections'][-1]['source relationship name'] = conn_name - if conn_procs not in visited: - self.serialize_node(conn_procs, res, visited) - - return (res, visited) - - def _add_controller_service_node(self, controller, parent): - if hasattr(controller, 'name'): - connectable_name = controller.name - else: - connectable_name = str(controller.uuid) - - parent['Controller Services'].append({ - 'name': connectable_name, - 'id': controller.id, - 'class': controller.service_class, - 'Properties': controller.properties - }) - - if controller.linked_services: - if len(controller.linked_services) == 1: - parent['Controller Services'][-1]['Properties']['Linked Services'] = controller.linked_services[0].name - else: - parent['Controller Services'][-1]['Properties']['Linked Services'] = [{"value": service.name} for service in controller.linked_services] - - def serialize_controller(self, controller, root=None): - if root is None: - res = { - 'Flow Controller': { - 'name': 'MiNiFi Flow' - }, - 'Processors': [], - 'Funnels': [], - 'Connections': [], - 'Remote Processing Groups': [], - 'Controller Services': [] - } - else: - res = root - - self._add_controller_service_node(controller, res) - - return res diff --git a/docker/test/integration/minifi/flow_serialization/Nifi_flow_json_serializer.py b/docker/test/integration/minifi/flow_serialization/Nifi_flow_json_serializer.py deleted file mode 100644 index 4b78127ad5..0000000000 --- a/docker/test/integration/minifi/flow_serialization/Nifi_flow_json_serializer.py +++ /dev/null @@ -1,250 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import uuid -import json - -from ..core.Processor import Processor -from ..core.InputPort import InputPort -from ..core.OutputPort import OutputPort - - -class Nifi_flow_json_serializer: - def serialize(self, start_nodes, nifi_version=None): - res = { - "encodingVersion": { - "majorVersion": 2, - "minorVersion": 0 - }, - "maxTimerDrivenThreadCount": 10, - "maxEventDrivenThreadCount": 1, - "registries": [], - "parameterContexts": [], - "parameterProviders": [], - "controllerServices": [], - "reportingTasks": [], - "templates": [], - "rootGroup": { - "identifier": "9802c873-3322-3b60-a71d-732d02bd60f8", - "instanceIdentifier": str(uuid.uuid4()), - "name": "NiFi Flow", - "comments": "", - "position": { - "x": 0, - "y": 0 - }, - "processGroups": [], - "remoteProcessGroups": [], - "processors": [], - "inputPorts": [], - "outputPorts": [], - "connections": [], - "labels": [], - "funnels": [], - "controllerServices": [], - "defaultFlowFileExpiration": "0 sec", - "defaultBackPressureObjectThreshold": 10000, - "defaultBackPressureDataSizeThreshold": "1 GB", - "scheduledState": "RUNNING", - "executionEngine": "INHERITED", - "maxConcurrentTasks": 1, - "statelessFlowTimeout": "1 min", - "flowFileConcurrency": "UNBOUNDED", - "flowFileOutboundPolicy": "STREAM_WHEN_AVAILABLE", - "componentType": "PROCESS_GROUP" - } - } - visited = [] - - for node in start_nodes: - self.serialize_node(node, nifi_version, res['rootGroup'], visited) - - return json.dumps(res) - - def serialize_node(self, connectable, nifi_version, root, visited): - if visited is None: - visited = [] - - visited.append(connectable) - - if hasattr(connectable, 'name'): - connectable_name_text = connectable.name - else: - connectable_name_text = str(connectable.uuid) - - if isinstance(connectable, InputPort): - root['inputPorts'].append({ - 'identifier': str(connectable.uuid), - 'instanceIdentifier': str(connectable.instance_id), - 'name': connectable_name_text, - "comments": "", - 'position': { - 'x': 0, - 'y': 0 - }, - 'type': 'INPUT_PORT', - 'concurrentlySchedulableTaskCount': 1, - 'scheduledState': 'RUNNING', - 'allowRemoteAccess': True, - 'portFunction': 'STANDARD', - 'componentType': 'INPUT_PORT', - "groupIdentifier": "9802c873-3322-3b60-a71d-732d02bd60f8" - }) - - if isinstance(connectable, OutputPort): - root['outputPorts'].append({ - 'identifier': str(connectable.uuid), - 'instanceIdentifier': str(connectable.instance_id), - 'name': connectable_name_text, - "comments": "", - 'position': { - 'x': 0, - 'y': 0 - }, - 'type': 'OUTPUT_PORT', - 'concurrentlySchedulableTaskCount': 1, - 'scheduledState': 'RUNNING', - 'allowRemoteAccess': True, - 'portFunction': 'STANDARD', - 'componentType': 'OUTPUT_PORT', - "groupIdentifier": "9802c873-3322-3b60-a71d-732d02bd60f8" - }) - - if isinstance(connectable, Processor): - root['processors'].append({ - "identifier": str(connectable.uuid), - "instanceIdentifier": str(connectable.instance_id), - "name": connectable_name_text, - "comments": "", - "position": { - "x": 0, - "y": 0 - }, - "type": 'org.apache.nifi.processors.standard.' + connectable.clazz, - "bundle": { - "group": "org.apache.nifi", - "artifact": "nifi-standard-nar", - "version": nifi_version - }, - "properties": {key: value for key, value in connectable.properties.items() if connectable.nifi_property_key(key)}, - "propertyDescriptors": {}, - "style": {}, - "schedulingPeriod": "0 sec" if connectable.schedule['scheduling strategy'] == "EVENT_DRIVEN" else connectable.schedule['scheduling period'], - "schedulingStrategy": "TIMER_DRIVEN", - "executionNode": "ALL", - "penaltyDuration": connectable.schedule['penalization period'], - "yieldDuration": connectable.schedule['yield period'], - "bulletinLevel": "WARN", - "runDurationMillis": str(int(connectable.schedule['run duration nanos'] / 1000000)), - "concurrentlySchedulableTaskCount": connectable.max_concurrent_tasks, - "autoTerminatedRelationships": connectable.auto_terminate, - "scheduledState": "RUNNING", - "retryCount": 10, - "retriedRelationships": [], - "backoffMechanism": "PENALIZE_FLOWFILE", - "maxBackoffPeriod": "10 mins", - "componentType": "PROCESSOR", - "groupIdentifier": "9802c873-3322-3b60-a71d-732d02bd60f8" - }) - - for svc in connectable.controller_services: - if svc in visited: - continue - - root['processors'].append({ - "identifier": str(svc.id), - "instanceIdentifier": str(svc.instance_id), - "name": svc.name, - "type": svc.service_class, - "bundle": { - "group": svc.group, - "artifact": svc.artifact, - "version": nifi_version - }, - "properties": svc.properties, - "propertyDescriptors": {}, - "controllerServiceApis": [], - "scheduledState": "ENABLED", - "bulletinLevel": "WARN", - "componentType": "CONTROLLER_SERVICE" - }) - - visited.append(svc) - - for conn_name in connectable.connections: - conn_procs = connectable.connections[conn_name] - - if not isinstance(conn_procs, list): - conn_procs = [conn_procs] - - source_type = "" - if isinstance(connectable, Processor): - source_type = 'PROCESSOR' - elif isinstance(connectable, InputPort): - source_type = 'INPUT_PORT' - elif isinstance(connectable, OutputPort): - source_type = 'OUTPUT_PORT' - else: - raise Exception('Unexpected source type: %s' % type(connectable)) - - for proc in conn_procs: - dest_type = "" - if isinstance(proc, Processor): - dest_type = 'PROCESSOR' - elif isinstance(proc, InputPort): - dest_type = 'INPUT_PORT' - elif isinstance(proc, OutputPort): - dest_type = 'OUTPUT_PORT' - else: - raise Exception('Unexpected destination type: %s' % type(proc)) - - root['connections'].append({ - "identifier": str(uuid.uuid4()), - "instanceIdentifier": str(uuid.uuid4()), - "name": "", - "source": { - "id": str(connectable.uuid), - "type": source_type, - "groupId": "9802c873-3322-3b60-a71d-732d02bd60f8", - "name": conn_name, - "comments": "", - "instanceIdentifier": str(connectable.instance_id) - }, - "destination": { - "id": str(proc.uuid), - "type": dest_type, - "groupId": "9802c873-3322-3b60-a71d-732d02bd60f8", - "name": proc.name, - "comments": "", - "instanceIdentifier": str(proc.instance_id) - }, - "labelIndex": 1, - "zIndex": 0, - "selectedRelationships": [conn_name] if not isinstance(connectable, InputPort) and not isinstance(connectable, OutputPort) else [""], - "backPressureObjectThreshold": 10, - "backPressureDataSizeThreshold": "50 B", - "flowFileExpiration": "0 sec", - "prioritizers": [], - "bends": [], - "loadBalanceStrategy": "DO_NOT_LOAD_BALANCE", - "partitioningAttribute": "", - "loadBalanceCompression": "DO_NOT_COMPRESS", - "componentType": "CONNECTION", - "groupIdentifier": "9802c873-3322-3b60-a71d-732d02bd60f8" - }) - - if proc not in visited: - self.serialize_node(proc, nifi_version, root, visited) diff --git a/docker/test/integration/minifi/flow_serialization/__init__.py b/docker/test/integration/minifi/flow_serialization/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/minifi/processors/AzureBlobStorageProcessorBase.py b/docker/test/integration/minifi/processors/AzureBlobStorageProcessorBase.py deleted file mode 100644 index 811c306856..0000000000 --- a/docker/test/integration/minifi/processors/AzureBlobStorageProcessorBase.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class AzureBlobStorageProcessorBase(Processor): - def __init__(self, context, clazz, additional_properties={}, schedule={"scheduling strategy": "TIMER_DRIVEN"}): - hostname = f"http://azure-storage-server-{context.feature_id}" - super(AzureBlobStorageProcessorBase, self).__init__( - context=context, - clazz=clazz, - properties={ - 'Container Name': 'test-container', - 'Connection String': 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint={hostname}:10000/devstoreaccount1;QueueEndpoint={hostname}:10001/devstoreaccount1;'.format(hostname=hostname), - 'Blob': 'test-blob', - **additional_properties - }, - schedule=schedule, - auto_terminate=['success', 'failure']) diff --git a/docker/test/integration/minifi/processors/ConvertRecord.py b/docker/test/integration/minifi/processors/ConvertRecord.py deleted file mode 100644 index 164648eb97..0000000000 --- a/docker/test/integration/minifi/processors/ConvertRecord.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ..core.Processor import Processor - - -class ConvertRecord(Processor): - def __init__(self, context): - super(ConvertRecord, self).__init__( - context=context, - clazz='ConvertRecord', - auto_terminate=['success', 'failure']) diff --git a/docker/test/integration/minifi/processors/DeleteAzureBlobStorage.py b/docker/test/integration/minifi/processors/DeleteAzureBlobStorage.py deleted file mode 100644 index 356057447d..0000000000 --- a/docker/test/integration/minifi/processors/DeleteAzureBlobStorage.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .AzureBlobStorageProcessorBase import AzureBlobStorageProcessorBase - - -class DeleteAzureBlobStorage(AzureBlobStorageProcessorBase): - def __init__(self, context): - super(DeleteAzureBlobStorage, self).__init__( - context=context, - clazz='DeleteAzureBlobStorage', - schedule={"scheduling strategy": "EVENT_DRIVEN"}) diff --git a/docker/test/integration/minifi/processors/ExecuteSQL.py b/docker/test/integration/minifi/processors/ExecuteSQL.py deleted file mode 100644 index e1570f67f8..0000000000 --- a/docker/test/integration/minifi/processors/ExecuteSQL.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class ExecuteSQL(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(ExecuteSQL, self).__init__( - context=context, - clazz='ExecuteSQL', - auto_terminate=['success'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/ExecuteScript.py b/docker/test/integration/minifi/processors/ExecuteScript.py deleted file mode 100644 index 30eb9b4c1f..0000000000 --- a/docker/test/integration/minifi/processors/ExecuteScript.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class ExecuteScript(Processor): - def __init__(self, context): - super(ExecuteScript, self).__init__( - context=context, - clazz='ExecuteScript', - auto_terminate=['success', 'failure']) diff --git a/docker/test/integration/minifi/processors/FetchAzureBlobStorage.py b/docker/test/integration/minifi/processors/FetchAzureBlobStorage.py deleted file mode 100644 index e1bda7b148..0000000000 --- a/docker/test/integration/minifi/processors/FetchAzureBlobStorage.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .AzureBlobStorageProcessorBase import AzureBlobStorageProcessorBase - - -class FetchAzureBlobStorage(AzureBlobStorageProcessorBase): - def __init__(self, context): - super(FetchAzureBlobStorage, self).__init__( - context=context, - clazz='FetchAzureBlobStorage', - schedule={"scheduling strategy": "EVENT_DRIVEN"}) diff --git a/docker/test/integration/minifi/processors/FetchFile.py b/docker/test/integration/minifi/processors/FetchFile.py deleted file mode 100644 index bfed609836..0000000000 --- a/docker/test/integration/minifi/processors/FetchFile.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class FetchFile(Processor): - def __init__(self, context): - super(FetchFile, self).__init__( - context=context, - clazz='FetchFile', - schedule={"scheduling strategy": "EVENT_DRIVEN"}, - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/FetchOPCProcessor.py b/docker/test/integration/minifi/processors/FetchOPCProcessor.py deleted file mode 100644 index 32dd5335fd..0000000000 --- a/docker/test/integration/minifi/processors/FetchOPCProcessor.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class FetchOPCProcessor(Processor): - def __init__(self, context): - super(FetchOPCProcessor, self).__init__( - context=context, - clazz='FetchOPCProcessor', - auto_terminate=['success', 'failure']) diff --git a/docker/test/integration/minifi/processors/GaussianDistributionWithNumpy.py b/docker/test/integration/minifi/processors/GaussianDistributionWithNumpy.py deleted file mode 100644 index 52c225f524..0000000000 --- a/docker/test/integration/minifi/processors/GaussianDistributionWithNumpy.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class GaussianDistributionWithNumpy(Processor): - def __init__(self, context): - super(GaussianDistributionWithNumpy, self).__init__(context=context, - clazz='GaussianDistributionWithNumpy', - class_prefix='org.apache.nifi.minifi.processors.examples.') diff --git a/docker/test/integration/minifi/processors/GenerateFlowFile.py b/docker/test/integration/minifi/processors/GenerateFlowFile.py deleted file mode 100644 index 697b507bad..0000000000 --- a/docker/test/integration/minifi/processors/GenerateFlowFile.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class GenerateFlowFile(Processor): - def __init__(self, context, schedule={'scheduling period': '2 sec'}): - super(GenerateFlowFile, self).__init__( - context=context, - clazz='GenerateFlowFile', - schedule=schedule, - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/GetFile.py b/docker/test/integration/minifi/processors/GetFile.py deleted file mode 100644 index 9fa08044d5..0000000000 --- a/docker/test/integration/minifi/processors/GetFile.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class GetFile(Processor): - def __init__(self, context, input_dir="/tmp/input", schedule={'scheduling period': '2 sec'}): - super(GetFile, self).__init__( - context=context, - clazz='GetFile', - properties={ - 'Input Directory': input_dir, - }, - schedule=schedule, - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/HashContent.py b/docker/test/integration/minifi/processors/HashContent.py deleted file mode 100644 index 144650e26c..0000000000 --- a/docker/test/integration/minifi/processors/HashContent.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class HashContent(Processor): - def __init__(self, context, schedule={"scheduling strategy": "EVENT_DRIVEN"}): - super(HashContent, self).__init__( - context=context, - clazz="HashContent", - properties={"Hash Attribute": "hash"}, - schedule=schedule, - auto_terminate=["success", "failure"]) diff --git a/docker/test/integration/minifi/processors/InvokeHTTP.py b/docker/test/integration/minifi/processors/InvokeHTTP.py deleted file mode 100644 index 6ef963d32a..0000000000 --- a/docker/test/integration/minifi/processors/InvokeHTTP.py +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class InvokeHTTP(Processor): - def __init__( - self, - context, - ssl_context_service=None, - schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - properties = { - "Proxy Host": "", - "Proxy Port": "", - "invokehttp-proxy-username": "", - "invokehttp-proxy-password": "" - } - - controller_services = [] - - if ssl_context_service is not None: - properties['SSL Context Service'] = ssl_context_service.name - controller_services.append(ssl_context_service) - - super(InvokeHTTP, self).__init__( - context=context, - clazz='InvokeHTTP', - properties=properties, - controller_services=controller_services, - auto_terminate=['success', 'response', 'retry', 'failure', 'no retry'], - schedule=schedule) - self.out_proc.connect({"failure": self}) diff --git a/docker/test/integration/minifi/processors/ListAzureBlobStorage.py b/docker/test/integration/minifi/processors/ListAzureBlobStorage.py deleted file mode 100644 index ad7186bcdd..0000000000 --- a/docker/test/integration/minifi/processors/ListAzureBlobStorage.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ..core.Processor import Processor - - -class ListAzureBlobStorage(Processor): - def __init__(self, context): - connection_string = 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure-storage-server-{feature_id}:10000/devstoreaccount1;QueueEndpoint=http://azure-storage-server-{feature_id}:10001/devstoreaccount1;'.format(feature_id=context.feature_id) - super(ListAzureBlobStorage, self).__init__( - context=context, - clazz='ListAzureBlobStorage', - properties={ - 'Container Name': 'test-container', - 'Connection String': connection_string}, - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/ListFile.py b/docker/test/integration/minifi/processors/ListFile.py deleted file mode 100644 index 649a8e8ea0..0000000000 --- a/docker/test/integration/minifi/processors/ListFile.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class ListFile(Processor): - def __init__(self, context, schedule={'scheduling period': '2 sec'}): - super(ListFile, self).__init__( - context=context, - clazz='ListFile', - schedule=schedule, - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/ListenHTTP.py b/docker/test/integration/minifi/processors/ListenHTTP.py deleted file mode 100644 index ef68c9bc12..0000000000 --- a/docker/test/integration/minifi/processors/ListenHTTP.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class ListenHTTP(Processor): - def __init__(self, context, cert=None, schedule=None): - properties = {} - - if cert is not None: - properties['SSL Certificate'] = cert - properties['SSL Verify Peer'] = 'no' - - super(ListenHTTP, self).__init__( - context=context, - clazz='ListenHTTP', - properties=properties, - auto_terminate=['success'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/ListenSyslog.py b/docker/test/integration/minifi/processors/ListenSyslog.py deleted file mode 100644 index 585cb69dad..0000000000 --- a/docker/test/integration/minifi/processors/ListenSyslog.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ..core.Processor import Processor - - -class ListenSyslog(Processor): - def __init__(self, context, schedule=None): - properties = {} - - super(ListenSyslog, self).__init__( - context=context, - clazz='ListenSyslog', - properties=properties, - auto_terminate=['success'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/LogAttribute.py b/docker/test/integration/minifi/processors/LogAttribute.py deleted file mode 100644 index f8f43f80da..0000000000 --- a/docker/test/integration/minifi/processors/LogAttribute.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class LogAttribute(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(LogAttribute, self).__init__( - context=context, - clazz='LogAttribute', - auto_terminate=['success'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/PutAzureBlobStorage.py b/docker/test/integration/minifi/processors/PutAzureBlobStorage.py deleted file mode 100644 index f49a942bd1..0000000000 --- a/docker/test/integration/minifi/processors/PutAzureBlobStorage.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .AzureBlobStorageProcessorBase import AzureBlobStorageProcessorBase - - -class PutAzureBlobStorage(AzureBlobStorageProcessorBase): - def __init__(self, context): - super(PutAzureBlobStorage, self).__init__( - context=context, - clazz='PutAzureBlobStorage', - additional_properties={'Create Container': 'true'}) diff --git a/docker/test/integration/minifi/processors/PutFile.py b/docker/test/integration/minifi/processors/PutFile.py deleted file mode 100644 index 5b9b665ff5..0000000000 --- a/docker/test/integration/minifi/processors/PutFile.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class PutFile(Processor): - def __init__(self, context, output_dir="/tmp/output", schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(PutFile, self).__init__( - context=context, - clazz='PutFile', - properties={'Directory': output_dir, 'Directory Permissions': '777', 'Permissions': '777'}, - auto_terminate=['success', 'failure'], - schedule=schedule) - - def nifi_property_key(self, key): - if key == 'Directory Permissions': - return None - else: - return key diff --git a/docker/test/integration/minifi/processors/PutOPCProcessor.py b/docker/test/integration/minifi/processors/PutOPCProcessor.py deleted file mode 100644 index 6da447867c..0000000000 --- a/docker/test/integration/minifi/processors/PutOPCProcessor.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class PutOPCProcessor(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(PutOPCProcessor, self).__init__( - context=context, - clazz='PutOPCProcessor', - auto_terminate=['success', 'failure'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/PutSQL.py b/docker/test/integration/minifi/processors/PutSQL.py deleted file mode 100644 index 64564ad642..0000000000 --- a/docker/test/integration/minifi/processors/PutSQL.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class PutSQL(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(PutSQL, self).__init__( - context=context, - clazz='PutSQL', - auto_terminate=['success'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/QueryDatabaseTable.py b/docker/test/integration/minifi/processors/QueryDatabaseTable.py deleted file mode 100644 index aa3b4a036c..0000000000 --- a/docker/test/integration/minifi/processors/QueryDatabaseTable.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class QueryDatabaseTable(Processor): - def __init__(self, context): - super(QueryDatabaseTable, self).__init__( - context=context, - clazz='QueryDatabaseTable', - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/ReplaceText.py b/docker/test/integration/minifi/processors/ReplaceText.py deleted file mode 100644 index af5bfcf76d..0000000000 --- a/docker/test/integration/minifi/processors/ReplaceText.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class ReplaceText(Processor): - def __init__(self, context): - super(ReplaceText, self).__init__( - context=context, - clazz='ReplaceText', - properties={}, - schedule={'scheduling strategy': 'EVENT_DRIVEN'}, - auto_terminate=['success', 'failure']) diff --git a/docker/test/integration/minifi/processors/RouteOnAttribute.py b/docker/test/integration/minifi/processors/RouteOnAttribute.py deleted file mode 100644 index cf103999e6..0000000000 --- a/docker/test/integration/minifi/processors/RouteOnAttribute.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class RouteOnAttribute(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(RouteOnAttribute, self).__init__( - context=context, - clazz='RouteOnAttribute', - properties={}, - schedule=schedule, - auto_terminate=['unmatched', "failure"]) diff --git a/docker/test/integration/minifi/processors/RouteText.py b/docker/test/integration/minifi/processors/RouteText.py deleted file mode 100644 index 67d61c1c7b..0000000000 --- a/docker/test/integration/minifi/processors/RouteText.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class RouteText(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(RouteText, self).__init__( - context=context, - clazz='RouteText', - schedule=schedule, - auto_terminate=['unmatched', "matched", "original"]) diff --git a/docker/test/integration/minifi/processors/RunLlamaCppInference.py b/docker/test/integration/minifi/processors/RunLlamaCppInference.py deleted file mode 100644 index fd69d7d435..0000000000 --- a/docker/test/integration/minifi/processors/RunLlamaCppInference.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class RunLlamaCppInference(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(RunLlamaCppInference, self).__init__( - context=context, - clazz='RunLlamaCppInference', - auto_terminate=['success', 'failure'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/SplitText.py b/docker/test/integration/minifi/processors/SplitText.py deleted file mode 100644 index e8298f3ab2..0000000000 --- a/docker/test/integration/minifi/processors/SplitText.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from ..core.Processor import Processor - - -class SplitText(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(SplitText, self).__init__( - context=context, - clazz='SplitText', - schedule=schedule, - auto_terminate=['original', "splits", "failure"]) diff --git a/docker/test/integration/minifi/processors/TailFile.py b/docker/test/integration/minifi/processors/TailFile.py deleted file mode 100644 index 6649dcc510..0000000000 --- a/docker/test/integration/minifi/processors/TailFile.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class TailFile(Processor): - def __init__(self, context, filename="/tmp/input/test_file.log"): - super(TailFile, self).__init__(context=context, - clazz='TailFile', - properties={'File to Tail': filename}, - auto_terminate=['success']) diff --git a/docker/test/integration/minifi/processors/UpdateAttribute.py b/docker/test/integration/minifi/processors/UpdateAttribute.py deleted file mode 100644 index c66b598ac6..0000000000 --- a/docker/test/integration/minifi/processors/UpdateAttribute.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from ..core.Processor import Processor - - -class UpdateAttribute(Processor): - def __init__(self, context, schedule={'scheduling strategy': 'EVENT_DRIVEN'}): - super(UpdateAttribute, self).__init__( - context=context, - clazz='UpdateAttribute', - auto_terminate=['success'], - schedule=schedule) diff --git a/docker/test/integration/minifi/processors/__init__.py b/docker/test/integration/minifi/processors/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/minifi/validators/EmptyFilesOutPutValidator.py b/docker/test/integration/minifi/validators/EmptyFilesOutPutValidator.py deleted file mode 100644 index d34abfbcc3..0000000000 --- a/docker/test/integration/minifi/validators/EmptyFilesOutPutValidator.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from os import listdir - -from .FileOutputValidator import FileOutputValidator - - -class EmptyFilesOutPutValidator(FileOutputValidator): - - """ - Validates if all the files in the target directory are empty and at least one exists - """ - def __init__(self): - self.valid = False - - def validate(self, dir=''): - - if self.valid: - return True - - full_dir = self.output_dir + dir - logging.info("Output folder: %s", full_dir) - listing = listdir(full_dir) - if listing: - self.valid = 0 < self.get_num_files(full_dir) and all(os.path.getsize(os.path.join(full_dir, x)) == 0 for x in listing) - - return self.valid diff --git a/docker/test/integration/minifi/validators/FileOutputValidator.py b/docker/test/integration/minifi/validators/FileOutputValidator.py deleted file mode 100644 index d8b9fb4902..0000000000 --- a/docker/test/integration/minifi/validators/FileOutputValidator.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os -import re - -from os import listdir -from os.path import join -from utils import is_temporary_output_file - -from .OutputValidator import OutputValidator - - -class FileOutputValidator(OutputValidator): - def set_output_dir(self, output_dir): - self.output_dir = output_dir - - @staticmethod - def num_files_matching_regex_in_dir(dir_path: str, expected_content_regex: str): - listing = listdir(dir_path) - if not listing: - return 0 - files_of_matching_content_found = 0 - for file_name in listing: - full_path = join(dir_path, file_name) - if not os.path.isfile(full_path) or is_temporary_output_file(full_path): - continue - with open(full_path, 'r') as out_file: - content = out_file.read() - if re.search(expected_content_regex, content): - files_of_matching_content_found += 1 - return files_of_matching_content_found - - @staticmethod - def num_files_matching_content_in_dir(dir_path, expected_content): - listing = listdir(dir_path) - if not listing: - return 0 - files_of_matching_content_found = 0 - for file_name in listing: - full_path = join(dir_path, file_name) - if not os.path.isfile(full_path) or is_temporary_output_file(full_path): - continue - with open(full_path, 'r') as out_file: - contents = out_file.read() - logging.info("dir %s -- name %s", dir_path, file_name) - logging.info("expected content: %s -- actual: %s, match: %r", expected_content, contents, expected_content in contents) - if expected_content in contents: - files_of_matching_content_found += 1 - return files_of_matching_content_found - - @staticmethod - def get_num_files(dir_path): - listing = listdir(dir_path) - logging.info("Num files in %s: %d", dir_path, len(listing)) - if not listing: - return 0 - files_found = 0 - for file_name in listing: - full_path = join(dir_path, file_name) - if os.path.isfile(full_path) and not is_temporary_output_file(full_path): - logging.info("Found output file in %s: %s", dir_path, file_name) - files_found += 1 - return files_found - - @staticmethod - def get_num_files_with_min_size(dir_path: str, min_size: int): - listing = listdir(dir_path) - logging.info("Num files in %s: %d", dir_path, len(listing)) - if not listing: - return 0 - files_found = 0 - for file_name in listing: - full_path = join(dir_path, file_name) - if os.path.isfile(full_path) and not is_temporary_output_file(full_path) and os.path.getsize(full_path) >= min_size: - logging.info("Found output file in %s: %s", dir_path, file_name) - files_found += 1 - return files_found - - def validate(self, dir=''): - pass diff --git a/docker/test/integration/minifi/validators/MultiFileOutputValidator.py b/docker/test/integration/minifi/validators/MultiFileOutputValidator.py deleted file mode 100644 index cbe454788e..0000000000 --- a/docker/test/integration/minifi/validators/MultiFileOutputValidator.py +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from os import listdir -from os.path import join -from utils import is_temporary_output_file - -from .FileOutputValidator import FileOutputValidator - - -class MultiFileOutputValidator(FileOutputValidator): - """ - Validates the number of files created and/or the content of multiple files in the given directory, also verifying that the old files are not rewritten. - """ - - def __init__(self, expected_file_count, expected_content=[]): - self.expected_file_count = expected_file_count - self.file_timestamps = dict() - self.expected_content = expected_content - - def check_expected_content(self, full_dir): - if not self.expected_content: - return True - - for content in self.expected_content: - if self.num_files_matching_content_in_dir(full_dir, content) == 0: - return False - - return True - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - listing = listdir(full_dir) - if not listing: - return False - - for out_file_name in listing: - logging.info("name:: %s", out_file_name) - - full_path = join(full_dir, out_file_name) - if not os.path.isfile(full_path) or is_temporary_output_file(full_path): - return False - - logging.info("dir %s -- name %s", full_dir, out_file_name) - logging.info("expected file count %d -- current file count %d", self.expected_file_count, len(self.file_timestamps)) - - if full_path in self.file_timestamps and self.file_timestamps[full_path] != os.path.getmtime(full_path): - logging.error("Last modified timestamp changed for %s", full_path) - return False - - self.file_timestamps[full_path] = os.path.getmtime(full_path) - logging.info("New file added %s", full_path) - - if self.expected_file_count != 0 and len(self.file_timestamps) != self.expected_file_count: - return False - - if len(self.file_timestamps) >= len(self.expected_content): - return self.check_expected_content(full_dir) - - return False diff --git a/docker/test/integration/minifi/validators/NoContentCheckFileNumberValidator.py b/docker/test/integration/minifi/validators/NoContentCheckFileNumberValidator.py deleted file mode 100644 index 5d2f964c00..0000000000 --- a/docker/test/integration/minifi/validators/NoContentCheckFileNumberValidator.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class NoContentCheckFileNumberValidator(FileOutputValidator): - """ - Validates the number of files created without content validation. - """ - - def __init__(self, num_files_expected): - self.num_files_expected = num_files_expected - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - return self.num_files_expected == self.get_num_files(full_dir) diff --git a/docker/test/integration/minifi/validators/NoFileOutPutValidator.py b/docker/test/integration/minifi/validators/NoFileOutPutValidator.py deleted file mode 100644 index dc4fd4fdba..0000000000 --- a/docker/test/integration/minifi/validators/NoFileOutPutValidator.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class NoFileOutPutValidator(FileOutputValidator): - """ - Validates if no flowfiles were transferred - """ - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - return os.path.isdir(full_dir) and 0 == self.get_num_files(full_dir) diff --git a/docker/test/integration/minifi/validators/NumFileRangeAndFileSizeValidator.py b/docker/test/integration/minifi/validators/NumFileRangeAndFileSizeValidator.py deleted file mode 100644 index 9394ddbb72..0000000000 --- a/docker/test/integration/minifi/validators/NumFileRangeAndFileSizeValidator.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class NumFileRangeAndFileSizeValidator(FileOutputValidator): - def __init__(self, min_files: int, max_files: int, min_size: int): - self.min_files = min_files - self.max_files = max_files - self.min_size = min_size - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - num_files = self.get_num_files_with_min_size(full_dir, self.min_size) - logging.info("Number of files with min size %d generated: %d", self.min_size, num_files) - return self.min_files <= num_files and num_files <= self.max_files diff --git a/docker/test/integration/minifi/validators/NumFileRangeValidator.py b/docker/test/integration/minifi/validators/NumFileRangeValidator.py deleted file mode 100644 index 164c299ac7..0000000000 --- a/docker/test/integration/minifi/validators/NumFileRangeValidator.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class NumFileRangeValidator(FileOutputValidator): - - def __init__(self, min_files, max_files): - self.min_files = min_files - self.max_files = max_files - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - num_files = self.get_num_files(full_dir) - logging.info("Number of files generated: %d", num_files) - return self.min_files <= num_files and num_files <= self.max_files diff --git a/docker/test/integration/minifi/validators/OutputValidator.py b/docker/test/integration/minifi/validators/OutputValidator.py deleted file mode 100644 index 8f47aae597..0000000000 --- a/docker/test/integration/minifi/validators/OutputValidator.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class OutputValidator(object): - """ - Base output validator class. Validators must implement - method validate, which returns a boolean. - """ - - def validate(self): - """ - Return True if output is valid; False otherwise. - """ - raise NotImplementedError("validate function needs to be implemented for validators") diff --git a/docker/test/integration/minifi/validators/SegfaultValidator.py b/docker/test/integration/minifi/validators/SegfaultValidator.py deleted file mode 100644 index 9fbe7625b5..0000000000 --- a/docker/test/integration/minifi/validators/SegfaultValidator.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from .OutputValidator import OutputValidator - - -class SegfaultValidator(OutputValidator): - """ - Validate that a file was received. - """ - def validate(self): - return True diff --git a/docker/test/integration/minifi/validators/SingleFileOutputValidator.py b/docker/test/integration/minifi/validators/SingleFileOutputValidator.py deleted file mode 100644 index d0ebf26922..0000000000 --- a/docker/test/integration/minifi/validators/SingleFileOutputValidator.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class SingleFileOutputValidator(FileOutputValidator): - """ - Validates the content of a single file in the given directory. - """ - - def __init__(self, expected_content): - self.expected_content = expected_content - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - return self.get_num_files(full_dir) == 1 and self.num_files_matching_content_in_dir(full_dir, self.expected_content) == 1 diff --git a/docker/test/integration/minifi/validators/SingleJSONFileOutputValidator.py b/docker/test/integration/minifi/validators/SingleJSONFileOutputValidator.py deleted file mode 100644 index 7c8be2dd03..0000000000 --- a/docker/test/integration/minifi/validators/SingleJSONFileOutputValidator.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os -import json - -from .FileOutputValidator import FileOutputValidator -from utils import is_temporary_output_file - - -class SingleJSONFileOutputValidator(FileOutputValidator): - """ - Validates the content of a single file in the given directory. - """ - - def __init__(self, expected_content): - self.expected_content = json.loads(expected_content) - - def file_matches_json_content(self, dir_path, expected_json_content): - listing = os.listdir(dir_path) - if not listing: - return 0 - for file_name in listing: - full_path = os.path.join(dir_path, file_name) - if not os.path.isfile(full_path) or is_temporary_output_file(full_path): - continue - with open(full_path, 'r') as out_file: - file_json_content = json.loads(out_file.read()) - if file_json_content != expected_json_content: - print(f"JSON doesnt match actual: {file_json_content}, expected: {expected_json_content}") - return file_json_content == expected_json_content - return False - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - return self.get_num_files(full_dir) == 1 and self.file_matches_json_content(full_dir, self.expected_content) diff --git a/docker/test/integration/minifi/validators/SingleOrMultiFileOutputRegexValidator.py b/docker/test/integration/minifi/validators/SingleOrMultiFileOutputRegexValidator.py deleted file mode 100644 index db4c734855..0000000000 --- a/docker/test/integration/minifi/validators/SingleOrMultiFileOutputRegexValidator.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class SingleOrMultiFileOutputRegexValidator(FileOutputValidator): - """ - Validates the content of a single or multiple files in the given directory. - """ - - def __init__(self, expected_content_regex: str): - self.expected_content_regex = expected_content_regex - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - return 0 < self.num_files_matching_regex_in_dir(full_dir, self.expected_content_regex) diff --git a/docker/test/integration/minifi/validators/SingleOrMultiFileOutputValidator.py b/docker/test/integration/minifi/validators/SingleOrMultiFileOutputValidator.py deleted file mode 100644 index 4d8a28501f..0000000000 --- a/docker/test/integration/minifi/validators/SingleOrMultiFileOutputValidator.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -import os - -from .FileOutputValidator import FileOutputValidator - - -class SingleOrMultiFileOutputValidator(FileOutputValidator): - """ - Validates the content of a single or multiple files in the given directory. - """ - - def __init__(self, expected_content): - self.expected_content = expected_content - - def validate(self): - full_dir = os.path.join(self.output_dir) - logging.info("Output folder: %s", full_dir) - - if not os.path.isdir(full_dir): - return False - - return 0 < self.num_files_matching_content_in_dir(full_dir, self.expected_content) diff --git a/docker/test/integration/minifi/validators/__init__.py b/docker/test/integration/minifi/validators/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/test/integration/resources/minifi/minifi-log.properties b/docker/test/integration/resources/minifi/minifi-log.properties deleted file mode 100644 index 88b580cf59..0000000000 --- a/docker/test/integration/resources/minifi/minifi-log.properties +++ /dev/null @@ -1,4 +0,0 @@ -spdlog.pattern=[%Y-%m-%d %H:%M:%S.%e] [%n] [%l] %v -appender.stderr=stderr -logger.root=TRACE,stderr -logger.org::apache::nifi::minifi=TRACE,stderr diff --git a/docker/test/integration/resources/minifi/minifi.properties b/docker/test/integration/resources/minifi/minifi.properties deleted file mode 100644 index c5ba7cf8e4..0000000000 --- a/docker/test/integration/resources/minifi/minifi.properties +++ /dev/null @@ -1,6 +0,0 @@ -nifi.administrative.yield.duration=30 sec -nifi.bored.yield.duration=100 millis -nifi.provenance.repository.max.storage.time=1 MIN -nifi.provenance.repository.max.storage.size=1 MB -nifi.database.content.repository.directory.default=${MINIFI_HOME}/content_repository -nifi.flow.engine.threads=5 diff --git a/docker/test/integration/ssl_utils/SSL_cert_utils.py b/docker/test/integration/ssl_utils/SSL_cert_utils.py deleted file mode 100644 index d39cf96460..0000000000 --- a/docker/test/integration/ssl_utils/SSL_cert_utils.py +++ /dev/null @@ -1,161 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import time -import logging -import random - -from M2Crypto import X509, EVP, RSA, ASN1 -from OpenSSL import crypto - - -def gen_cert(): - """ - Generate TLS certificate request for testing - """ - - req, key = gen_req() - pub_key = req.get_pubkey() - subject = req.get_subject() - cert = X509.X509() - # noinspection PyTypeChecker - cert.set_serial_number(1) - cert.set_version(2) - cert.set_subject(subject) - t = int(time.time()) - now = ASN1.ASN1_UTCTIME() - now.set_time(t) - now_plus_year = ASN1.ASN1_UTCTIME() - now_plus_year.set_time(t + 60 * 60 * 24 * 365) - cert.set_not_before(now) - cert.set_not_after(now_plus_year) - issuer = X509.X509_Name() - issuer.C = 'US' - issuer.CN = 'minifi-listen' - cert.set_issuer(issuer) - cert.set_pubkey(pub_key) - cert.sign(key, 'sha256') - - return cert, key - - -def rsa_gen_key_callback(): - pass - - -def gen_req(): - """ - Generate TLS certificate request for testing - """ - - logging.info('Generating test certificate request') - key = EVP.PKey() - req = X509.Request() - rsa = RSA.gen_key(1024, 65537, rsa_gen_key_callback) - key.assign_rsa(rsa) - req.set_pubkey(key) - name = req.get_subject() - name.C = 'US' - name.CN = 'minifi-listen' - req.sign(key, 'sha256') - - return req, key - - -def make_self_signed_cert(common_name): - ca_key = crypto.PKey() - ca_key.generate_key(crypto.TYPE_RSA, 2048) - - ca_cert = crypto.X509() - ca_cert.set_version(2) - ca_cert.set_serial_number(random.randint(50000000, 100000000)) - - ca_subj = ca_cert.get_subject() - ca_subj.commonName = common_name - - ca_cert.add_extensions([ - crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=ca_cert), - ]) - - ca_cert.add_extensions([ - crypto.X509Extension(b"authorityKeyIdentifier", False, b"keyid:always", issuer=ca_cert), - ]) - - ca_cert.add_extensions([ - crypto.X509Extension(b"basicConstraints", False, b"CA:TRUE"), - crypto.X509Extension(b"keyUsage", False, b"keyCertSign, cRLSign"), - ]) - - ca_cert.set_issuer(ca_subj) - ca_cert.set_pubkey(ca_key) - - ca_cert.gmtime_adj_notBefore(0) - ca_cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60) - - ca_cert.sign(ca_key, 'sha256') - - return ca_cert, ca_key - - -def _make_cert(common_name, ca_cert, ca_key, extended_key_usage=None): - key = crypto.PKey() - key.generate_key(crypto.TYPE_RSA, 2048) - - cert = crypto.X509() - cert.set_version(2) - cert.set_serial_number(random.randint(50000000, 100000000)) - - client_subj = cert.get_subject() - client_subj.commonName = common_name - - cert.add_extensions([ - crypto.X509Extension(b"basicConstraints", False, b"CA:FALSE"), - crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash", subject=cert), - ]) - - extensions = [crypto.X509Extension(b"authorityKeyIdentifier", False, b"keyid:always", issuer=ca_cert), - crypto.X509Extension(b"keyUsage", False, b"digitalSignature")] - - if extended_key_usage: - extensions.append(crypto.X509Extension(b"extendedKeyUsage", False, extended_key_usage)) - - cert.add_extensions([ - crypto.X509Extension(b"subjectAltName", False, b"DNS.1:" + common_name.encode()) - ]) - - cert.add_extensions(extensions) - - cert.set_issuer(ca_cert.get_subject()) - cert.set_pubkey(key) - - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60) - - cert.sign(ca_key, 'sha256') - - return cert, key - - -def make_client_cert(common_name, ca_cert, ca_key): - return _make_cert(common_name=common_name, ca_cert=ca_cert, ca_key=ca_key, extended_key_usage=b"clientAuth") - - -def make_server_cert(common_name, ca_cert, ca_key): - return _make_cert(common_name=common_name, ca_cert=ca_cert, ca_key=ca_key, extended_key_usage=b"serverAuth") - - -def make_cert_without_extended_usage(common_name, ca_cert, ca_key): - return _make_cert(common_name=common_name, ca_cert=ca_cert, ca_key=ca_key, extended_key_usage=None) diff --git a/docker/test/integration/ssl_utils/__init__.py b/docker/test/integration/ssl_utils/__init__.py deleted file mode 100644 index e69de29bb2..0000000000