diff --git a/.github/workflows/build_unittest_container.yml b/.github/workflows/build_unittest_container.yml new file mode 100644 index 00000000..254543db --- /dev/null +++ b/.github/workflows/build_unittest_container.yml @@ -0,0 +1,45 @@ +# CI step to build the unittest container to be used for the other CI steps +# publish the container in the github container registry + +name: Checks + +# This workflow should only run when the Dockerfile changes +on: + push: + paths: + - docker/Dockerfile + pull_request: + paths: + - docker/Dockerfile +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}-crown-unittest + +jobs: + build_unittest_container: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Log in to the Container registry + uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.TOKEN }} + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + - name: Build and push Docker image + uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + with: + context: . + file: docker/Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index f6bbcd6b..95bc5186 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -25,21 +25,12 @@ env: jobs: build_project: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 container: - image: rootproject/root:6.28.04-arch + image: ghcr.io/kit-cms/crown-crown-unittest:pr-260 options: --user 0 # run as root steps: - - name: update Arch keyring - run: pacman -Sc --noconfirm && pacman -Syy --noconfirm && pacman -Sy archlinux-keyring --noconfirm - - - name: Install missing software - run: pacman -Syu --noconfirm cmake make git python-pip openmp openmpi boost openssh --ignore root --ignore openssl - - - name: Install python packages - run: python -m pip install GitPython --break-system-packages && python -m pip install git+https://github.com/cms-nanoAOD/correctionlib.git --break-system-packages - - name: Clone project uses: actions/checkout@v3 with: @@ -57,7 +48,7 @@ jobs: - name: Configure CMake shell: bash - run: cd ${{github.workspace}}/build && cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DANALYSIS=$ANALYSIS -DCONFIG=$CONFIG -DSAMPLES=$SAMPLES -DERAS=$ERAS -DSCOPES=$SCOPES -DSHIFTS=$SHIFTS -DOPTIMIZED=false + run: cd ${{github.workspace}}/build && cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DANALYSIS=$ANALYSIS -DCONFIG=$CONFIG -DSAMPLES=$SAMPLES -DERAS=$ERAS -DSCOPES=$SCOPES -DSHIFTS=$SHIFTS -DOPTIMIZED=false -DONNXRUNTIME_INCLUDE_DIR=/opt/onnxruntime - name: Build shell: bash @@ -68,21 +59,12 @@ jobs: run: cd ${{github.workspace}}/build && ctest -V --label-regex "ntuple.*." build_single_friend: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 container: - image: rootproject/root:6.28.04-arch + image: ghcr.io/kit-cms/crown-crown-unittest:pr-260 options: --user 0 # run as root steps: - - name: update Arch keyring - run: pacman -Sc --noconfirm && pacman -Syy --noconfirm && pacman -Sy archlinux-keyring --noconfirm - - - name: Install missing software - run: pacman -Syu --noconfirm cmake make git python-pip openmp openmpi boost openssh --ignore root --ignore openssl - - - - name: Install python packages - run: python -m pip install GitPython --break-system-packages && python -m pip install git+https://github.com/cms-nanoAOD/correctionlib.git --break-system-packages - name: Clone project uses: actions/checkout@v3 @@ -101,7 +83,7 @@ jobs: - name: Configure CMake shell: bash - run: cd ${{github.workspace}}/build && cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DANALYSIS=$ANALYSIS -DCONFIG=$SINGLE_FRIEND_CONFIG -DSAMPLES=$SAMPLES -DERAS=$ERAS -DSCOPES=$SINGLE_FRIEND_SCOPES -DSHIFTS=$FRIEND_SHIFTS -DOPTIMIZED=false -DQUANTITIESMAP="dummy" + run: cd ${{github.workspace}}/build && cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DANALYSIS=$ANALYSIS -DCONFIG=$SINGLE_FRIEND_CONFIG -DSAMPLES=$SAMPLES -DERAS=$ERAS -DSCOPES=$SINGLE_FRIEND_SCOPES -DSHIFTS=$FRIEND_SHIFTS -DOPTIMIZED=false -DQUANTITIESMAP="dummy" -DONNXRUNTIME_INCLUDE_DIR=/opt/onnxruntime - name: Build shell: bash @@ -112,22 +94,12 @@ jobs: run: cd ${{github.workspace}}/build && ctest -V --label-regex "single_friend.*." build_two_friends: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 container: - image: rootproject/root:6.28.04-arch + image: ghcr.io/kit-cms/crown-crown-unittest:pr-260 options: --user 0 # run as root steps: - - name: update Arch keyring - run: pacman -Sc --noconfirm && pacman -Syy --noconfirm && pacman -Sy archlinux-keyring --noconfirm - - - name: Install missing software - run: pacman -Syu --noconfirm cmake make git python-pip openmp openmpi boost openssh --ignore root --ignore openssl - - - - name: Install python packages - run: python -m pip install GitPython --break-system-packages && python -m pip install git+https://github.com/cms-nanoAOD/correctionlib.git --break-system-packages - - name: Clone project uses: actions/checkout@v3 with: @@ -145,7 +117,7 @@ jobs: - name: Configure CMake shell: bash - run: cd ${{github.workspace}}/build && cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DANALYSIS=$ANALYSIS -DCONFIG=$TWO_FRIENDS_CONFIG -DSAMPLES=$SAMPLES -DERAS=$ERAS -DSCOPES=$TWO_FRIENDS_SCOPES -DSHIFTS=$FRIEND_SHIFTS -DOPTIMIZED=false -DQUANTITIESMAP="dummy" + run: cd ${{github.workspace}}/build && cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DANALYSIS=$ANALYSIS -DCONFIG=$TWO_FRIENDS_CONFIG -DSAMPLES=$SAMPLES -DERAS=$ERAS -DSCOPES=$TWO_FRIENDS_SCOPES -DSHIFTS=$FRIEND_SHIFTS -DOPTIMIZED=false -DQUANTITIESMAP="dummy" -DONNXRUNTIME_INCLUDE_DIR=/opt/onnxruntime - name: Build shell: bash @@ -157,9 +129,9 @@ jobs: python_format: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 container: - image: rootproject/root:6.26.00-ubuntu20.04 + image: ghcr.io/kit-cms/crown-crown-unittest:pr-260 options: --user 0 # run as root steps: @@ -167,7 +139,7 @@ jobs: run: apt-get -y update - name: Install missing software - run: apt-get install -y git python3-pip && pip install black + run: apt-get install -y git python3-pip && pip install black==23.3.0 - uses: actions/checkout@v2 @@ -176,9 +148,9 @@ jobs: run: cd $GITHUB_WORKSPACE && bash checks/python-formatting.sh cpp_format: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 container: - image: rootproject/root:6.26.00-ubuntu20.04 + image: ghcr.io/kit-cms/crown-crown-unittest:pr-260 options: --user 0 # run as root steps: @@ -195,9 +167,9 @@ jobs: run: cd $GITHUB_WORKSPACE && bash checks/cpp-formatting.sh docs: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 container: - image: rootproject/root:6.26.00-ubuntu20.04 + image: ghcr.io/kit-cms/crown-crown-unittest:pr-260 options: --user 0 # run as root steps: diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 6801a0a4..53eb14b2 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -5,12 +5,18 @@ # Required version: 2 +# Set the OS, Python version and other tools you might need + +build: + os: ubuntu-22.04 + tools: + python: "3.10" + # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/sphinx_source/conf.py # Optionally set the version of Python and requirements required to build your docs python: - version: 3.8 install: - requirements: docs/sphinx_source/requirements.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 43861cb1..80224639 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,213 +3,18 @@ cmake_minimum_required(VERSION 3.20) unset(CMAKE_BUILD_TYPE CACHE) unset(CMAKE_CXX_FLAGS_RELEASE CACHE) -# read debug and optimized flags from command line -option(BUILD_CROWNLIB_ONLY "Build only the CROWNLIB library" OFF) -set(REBUILD_CROWN_LIB "false") # used for non-production mode - -if (NOT DEFINED DEBUG) - message(STATUS "No Debug mode set, activate with -DDEBUG=true --> compile with debug symbols and run code generation with debug output") - set(DEBUG "false") -endif() - -if (NOT DEFINED OPTIMIZED) - message(STATUS "No Optimization not set, building with -DOPTIMIZED=true --> slower build times but faster runtimes") - set(OPTIMIZED "true") -endif() -# Convert args to lowercase -string( TOLOWER "${DEBUG}" DEBUG_PARSED) -string( TOLOWER "${OPTIMIZED}" OPTIMIZED_PARSED) -if(DEBUG_PARSED STREQUAL "true") - message(STATUS "Debug mode") - set(CMAKE_BUILD_TYPE "Debug" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel.") - set(CMAKE_CXX_FLAGS_DEBUG "-g" CACHE STRING "Set default compiler flags for build type Debug") -else() - set(DEBUG_PARSED "false") - if(OPTIMIZED_PARSED STREQUAL "true") - message(STATUS "Optimized mode") - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel.") - set(CMAKE_CXX_FLAGS_RELEASE "-O3 -DNDEBUG" CACHE STRING "Set default compiler flags for build type Release") - else() - message(STATUS "Unoptimized mode") - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel.") - set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG" CACHE STRING "Set default compiler flags for build type Release") - endif() -endif() -# Only parse additional args if not building only the CROWNLIB library -if(NOT BUILD_CROWNLIB_ONLY) - - if (NOT DEFINED ANALYSIS) - message(FATAL_ERROR "Please specify the Analysis to be used with -DANALYSIS=my_analysis_name") - endif() - # if analysis is set, check the folder to find any potential payload files to be used - file(GLOB PAYLOADS ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads/*) - if (NOT PAYLOADS) - message(STATUS "No payload files found in ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads/ for analysis ${ANALYSIS}") - else() - message(STATUS "Found payload files in ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads/ for analysis ${ANALYSIS}") - endif() - - if (NOT DEFINED CONFIG) - message(FATAL_ERROR "Please specify the config to be used with -DCONFIG=my_config_name") - endif() - - if (NOT DEFINED SCOPES) - message(FATAL_ERROR "No scope specificed, set the scopes via comma seperated list e.g. -DSCOPES=et,mt,tt,em") - endif() - - if (NOT DEFINED SHIFTS) - message(STATUS "No shifts specificed, using -DSHIFTS=all. If you want to run nominal only, use -DSHIFTS=none") - set(SHIFTS "all") - endif() - - if (NOT DEFINED QUANTITIESMAP) - message(STATUS "No quantities map specified, none will be used. If you want to produce friends, you have to specify quantities maps for all friend files e.g. -DQUANTITIESMAP=quantities_map_1.json,quantities_map_2.json. The input can be a comma-separated list of JSON files and/or root files (for debugging purposes).") - set(FRIENDS "false") - set(QUANTITIESMAP "none") - else() - set(FRIENDS "true") - endif() - - if (NOT DEFINED SAMPLES) - message(FATAL_ERROR "Please specify the samples to be used with -DSAMPLES=samples") - endif() - - if (NOT DEFINED ERAS) - message(FATAL_ERROR "Please specify the eras to be used with -DERAS=eras") - endif() - - if (NOT DEFINED PRODUCTION) - message(STATUS "No production mode set --> will rebuild the CROWNLIB library if necessary") - set(REBUILD_CROWN_LIB "true") - endif() - if (NOT DEFINED THREADS) - message(STATUS "No threads set, using single threaded mode with -DTHREADS=1") - set(THREADS "1") - endif() - string (REPLACE "," ";" ERAS "${ERAS}") - string (REPLACE "," ";" SAMPLES "${SAMPLES}") - message(STATUS "---------------------------------------------") - message(STATUS "|> Set up analysis for scopes ${SCOPES}.") - message(STATUS "|> Set up analysis for ${ANALYSIS}.") - message(STATUS "|> Set up analysis for config ${CONFIG}.") - message(STATUS "|> Set up analysis for samples ${SAMPLES}.") - message(STATUS "|> Set up analysis for eras ${ERAS}.") - message(STATUS "|> Set up analysis for shifts ${SHIFTS}.") - message(STATUS "|> Set up analysis with ${THREADS} threads.") - message(STATUS "|> Set up analysis with debug mode : ${DEBUG_PARSED}.") - message(STATUS "|> Set up analysis with optimization mode : ${OPTIMIZED_PARSED}.") - message(STATUS "|> generator is set to ${CMAKE_GENERATOR}") - # Define the default compiler flags for different build types, if different from the cmake defaults - # The build type should be set so that the correct compiler flags are chosen - message(STATUS "|> Code generation arguments: --analysis ${ANALYSIS} --config ${CONFIG} --scopes ${SCOPES} --shifts ${SHIFTS} --samples ${SAMPLES} --eras ${ERAS} --threads ${THREADS} --debug ${DEBUG_PARSED} --friends ${FRIENDS} --quantities_map ${QUANTITIESMAP}") - message(STATUS "---------------------------------------------") -else() # if building only the CROWNLIB library - message(STATUS "Building only the CROWNLIB library") - message(STATUS "No additional arguments parsed") -endif() +# include cmake modules +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") +# parse args +include(ParseArguments) # Set the default install directory to the build directory -set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR} CACHE STRING "Set default install prefix to the build directory") +set(CMAKE_INSTALL_PREFIX + ${CMAKE_BINARY_DIR} + CACHE STRING "Set default install prefix to the build directory") message(STATUS "Creating Project") # Create the project project(CROWN CXX) -message(STATUS "Finding Packages") -# Find ROOT and print details -find_package(ROOT 6.28 REQUIRED COMPONENTS ROOTVecOps ROOTDataFrame RooFit GenVector ROOTTMVASofieParser PyMVA) -# add OpenMP and MPI -find_package(OpenMP) -find_package(MPI) -# add nlohmann json -find_package(nlohmann_json) - -# Find Python 3 -find_package(Python 3.9 REQUIRED COMPONENTS Interpreter) - -# Find ONNXRuntime -# first check if we have an LCG stack via LCG_VERSION environment variable -if (DEFINED ENV{LCG_VERSION}) -string(REPLACE ":" ";" RUNTIME_PATH "$ENV{LD_LIBRARY_PATH}") - message(STATUS "Found LCG stack, using it to find ONNXRuntime") - find_library(ONNX_RUNTIME_LIB_PATH - NAMES onnxruntime - HINTS ${RUNTIME_PATH} - ) - if (ONNX_RUNTIME_LIB_PATH) - # get the real path of the library to find the include directory - get_filename_component(ONNX_RUNTIME_LIB_PATH ${ONNX_RUNTIME_LIB_PATH} REALPATH) - get_filename_component(ONNX_RUNTIME_INCLUDE_PATH ${ONNX_RUNTIME_LIB_PATH}/../../include REALPATH) - message(STATUS "ONNXRuntime include path: ${ONNX_RUNTIME_INCLUDE_PATH}") - include_directories("${ONNX_RUNTIME_INCLUDE_PATH}/core/session") - endif() - - message(STATUS "ONNXRuntime library path: ${ONNX_RUNTIME_LIB_PATH}") -else() - message(STATUS "No LCG stack found, not adding ONNXRuntime") -endif() - -message(STATUS "") -message(STATUS "Found ROOT with following settings:") -message(STATUS " Version: ${ROOT_VERSION}") -message(STATUS " ROOT executable: ${ROOT_EXECUTABLE}") -message(STATUS " Include directories: ${ROOT_INCLUDE_DIRS}") -message(STATUS " Compiler flags: ${ROOT_CXX_FLAGS}") -message(STATUS "") - -# Add ROOT flags to compile options, e.g. we have to use the same C++ standard -# Note that the flags from the build type, e.g. CMAKE_CXX_FLAGS_RELEASE, are -# automatically appended. You can check this during build time by enabling -# the verbose make output with "VERBOSE=1 make". -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ROOT_CXX_FLAGS}") - -# Use -fconcepts with g++ to silence following warning: -# warning: use of 'auto' in parameter declaration only available with '-fconcepts -if (CMAKE_CXX_COMPILER_ID STREQUAL GNU) - message(STATUS "Attach -fconcepts to the compiler flags to silence warnings.") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fconcepts") -endif() - -# Find the C++ standard from ROOT and set it as the standard of this project -# We require the C++ standard 17 or 20 and don't want to fall back to lower versions. -set(CMAKE_CXX_STANDARD_REQUIRED ON) -if (${ROOT_CXX_FLAGS} MATCHES "\\-std\\=c\\+\\+17") - message(STATUS "Set c++17 as the C++ standard.") - set(CMAKE_CXX_STANDARD 17) -elseif (${ROOT_CXX_FLAGS} MATCHES "\\-std\\=c\\+\\+20") - message(STATUS "Set c++20 as the C++ standard.") - set(CMAKE_CXX_STANDARD 20) -elseif (${ROOT_CXX_FLAGS} MATCHES "\\-std\\=c\\+\\+14") - message(STATUS "c++14 found, setting c++17 as the C++ standard.") - set(CMAKE_CXX_STANDARD 17) -else () - message(FATAL_ERROR "The standard c++17 or higher is required but not found in the ROOT flags: ${ROOT_CXX_FLAGS}") -endif() - -message(STATUS "Including spdlog.") -# Build the logging library -include(ExternalProject) -ExternalProject_Add(spdlog - PREFIX spdlog - GIT_REPOSITORY https://github.com/gabime/spdlog.git - GIT_SHALLOW 1 - GIT_TAG v1.8.5 - CMAKE_ARGS -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} - -DCMAKE_BUILD_TYPE=Release - -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} - -DCMAKE_CXX_FLAGS=-fpic - LOG_DOWNLOAD 1 LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1 - BUILD_BYPRODUCTS ${CMAKE_INSTALL_PREFIX}/lib64/libspdlog.a - BUILD_BYPRODUCTS ${CMAKE_INSTALL_PREFIX}/lib/libspdlog.a -) - -message(STATUS "Configuring spdlog.") -# Make an imported target out of the build logging library -add_library(logging STATIC IMPORTED) -file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/include") # required because the include dir must be existent for INTERFACE_INCLUDE_DIRECTORIES -include(GNUInstallDirs) # required to populate CMAKE_INSTALL_LIBDIR with lib or lib64 required for the destination of libspdlog.a -set_target_properties(logging PROPERTIES - IMPORTED_LOCATION "${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}/libspdlog.a" - INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_BINARY_DIR}/include") -add_dependencies(logging spdlog) # enforces to build spdlog before making the imported target # Print settings of the executable string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) @@ -218,206 +23,43 @@ message(STATUS "The executable is configured with following flags:") message(STATUS " Build type: ${CMAKE_BUILD_TYPE}") message(STATUS " C++ compiler: ${CMAKE_CXX_COMPILER}") message(STATUS " Base compiler flags: ${CMAKE_CXX_FLAGS}") -message(STATUS " Compiler flags from build type: ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") +message( + STATUS + " Compiler flags from build type: ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" +) message(STATUS "") -# detect virtualenv and set Pip args accordingly -if(DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX}) - set(_pip_args) -else() - set(_pip_args "--user") -endif() - -function(find_python_package PYPINAME NAME MIN_VERSION) - execute_process(COMMAND "${Python_EXECUTABLE}" "-c" "import ${NAME}; print(${NAME}.__version__)" - RESULT_VARIABLE PACKAGE_NOT_FOUND - OUTPUT_VARIABLE PACKAGE_VERSION - OUTPUT_STRIP_TRAILING_WHITESPACE) - if(${PACKAGE_NOT_FOUND} EQUAL 1) - execute_process(COMMAND ${Python_EXECUTABLE} -m pip install ${PYPINAME} ${_pip_args}) - execute_process(COMMAND "${Python_EXECUTABLE}" "-c" "import ${NAME}; print(${NAME}.__version__)" - RESULT_VARIABLE PACKAGE_NOT_FOUND - OUTPUT_VARIABLE PACKAGE_VERSION - OUTPUT_STRIP_TRAILING_WHITESPACE) - if(${PACKAGE_NOT_FOUND} EQUAL 1) - message(FATAL_ERROR "Failed to import ${PYPINAME} or get version.") - endif() - endif() - if(PACKAGE_VERSION VERSION_LESS MIN_VERSION) - message(FATAL_ERROR "The version of Python package ${PYPINAME} is too old (found ${PACKAGE_VERSION}, require at least ${MIN_VERSION}).") - endif() - message(STATUS "Found Python package ${PYPINAME} (require ${MIN_VERSION}, found ${PACKAGE_VERSION})") -endfunction() - -function(install_correctionlib) - execute_process(COMMAND "${Python_EXECUTABLE}" "-c" "import correctionlib; print(correctionlib.__version__)" - RESULT_VARIABLE PACKAGE_NOT_FOUND - OUTPUT_VARIABLE PACKAGE_VERSION - OUTPUT_STRIP_TRAILING_WHITESPACE) - if(${PACKAGE_NOT_FOUND} EQUAL 1) - execute_process(COMMAND ${Python_EXECUTABLE} -m pip install ${_pip_args} git+https://github.com/cms-nanoAOD/correctionlib.git) - endif() - message(STATUS "Found correctionlib !") -endfunction() - -# Adding correctionlib for scale factor evaluation -# for now the official pip package has some problem -# in the future "find_python_package(correctionlib correctionlib X.X)" should hopefully work -install_correctionlib() -message(STATUS "Setting up correctionlib ...") -execute_process(COMMAND correction config --cmake - OUTPUT_VARIABLE CORRECTION_LIB_ARGS - OUTPUT_STRIP_TRAILING_WHITESPACE) -string(REPLACE -Dcorrectionlib_DIR= "" CORRECTIONLIBPATH ${CORRECTION_LIB_ARGS}) -# if correctionlib comes from cvmfs, change the correctionlibpath accordingly -if (${CORRECTIONLIBPATH} MATCHES "^/cvmfs/") - message(STATUS "Setting up correctionlib from cvmfs ...") - set(USING_CVMFS TRUE) - find_package(correctionlib) - find_library(CORRECTION_LIB_PATH correctionlib) -else() - message(STATUS "Setting up correctionlib from local setup ...") - set(USING_CVMFS FALSE) - find_package(correctionlib REQUIRED PATHS ${CORRECTIONLIBPATH}) - set(CORRECTION_LIB_PATH "${CORRECTIONLIBPATH}/../lib/libcorrectionlib.so") -endif() -set(THREADS_PREFER_PTHREAD_FLAG ON) -find_package(Threads) -find_package(ZLIB) -set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) -message(STATUS "Correctionlib library path: ${CORRECTION_LIB_PATH}") - -if (NOT DEFINED INSTALLDIR) - message(STATUS "No -DINSTALLDIR specified, using default: ${CMAKE_CURRENT_BINARY_DIR}/bin") - set(INSTALLDIR ${CMAKE_CURRENT_BINARY_DIR}/bin) +message(STATUS "Finding Packages") +# add the different dependencies +include(AddBaseDependencies) +include(AddRoot) +include(AddLogging) +include(AddCorrectionlib) +include(AddOnnxruntime) +include(ConfigurePython) + +# installdir +if(NOT DEFINED INSTALLDIR) + message( + STATUS + "No -DINSTALLDIR specified, using default: ${CMAKE_CURRENT_BINARY_DIR}/bin" + ) + set(INSTALLDIR ${CMAKE_CURRENT_BINARY_DIR}/bin) endif() set(GENERATE_CPP_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) - -# build a shared lib from all CROWN functions -include_directories(${CMAKE_SOURCE_DIR}/src) -include_directories(${CMAKE_SOURCE_DIR}/include) -file(GLOB SOURCES_1 ${CMAKE_SOURCE_DIR}/src/*.cxx) -file(GLOB SOURCES_2 - ${CMAKE_SOURCE_DIR}/src/utility/*.cxx - ${CMAKE_SOURCE_DIR}/src/RecoilCorrections/*.cxx - ${CMAKE_SOURCE_DIR}/src/SVFit/*.cxx - ${CMAKE_SOURCE_DIR}/src/HHKinFit/*.cxx) -set(SOURCES ${SOURCES_1} ${SOURCES_2}) - - -if(BUILD_CROWNLIB_ONLY) - message(STATUS "Building only the CROWNLIB library") - add_library(CROWNLIB SHARED ${SOURCES}) - target_include_directories(CROWNLIB PRIVATE ${CMAKE_SOURCE_DIR} ${ROOT_INCLUDE_DIRS}) - target_link_libraries(CROWNLIB ROOT::ROOTVecOps ROOT::ROOTDataFrame ROOT::RooFit ROOT::GenVector ROOT::PyMVA ROOT::ROOTTMVASofieParser logging correctionlib nlohmann_json::nlohmann_json ${ONNX_RUNTIME_LIB_PATH}) - install(TARGETS CROWNLIB DESTINATION ${INSTALLDIR}/lib ) - return() -endif() -# check if CROWNLIB is already installed -find_library(CROWNLIB_FOUND CROWNLIB HINTS ${INSTALLDIR}/lib ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}/lib) -if(NOT CROWNLIB_FOUND OR REBUILD_CROWN_LIB) - message(STATUS "CROWNLIB not found, building it") - # CROWNLIB not found, build it - add_library(CROWNLIB SHARED ${SOURCES}) - target_include_directories(CROWNLIB PRIVATE ${CMAKE_SOURCE_DIR} ${ROOT_INCLUDE_DIRS}) - target_link_libraries(CROWNLIB ROOT::ROOTVecOps ROOT::ROOTDataFrame ROOT::RooFit ROOT::GenVector ROOT::PyMVA ROOT::ROOTTMVASofieParser logging correctionlib nlohmann_json::nlohmann_json ${ONNX_RUNTIME_LIB_PATH}) - install(TARGETS CROWNLIB DESTINATION ${INSTALLDIR}/lib) -else() - message(STATUS "Found CROWNLIB in ${CROWNLIB_FOUND}") - install(FILES ${CROWNLIB_FOUND} DESTINATION ${INSTALLDIR}/lib) - link_directories(${INSTALLDIR}/lib ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}/lib) -endif() - -# Generate the C++ code -if (FRIENDS) - set(GENERATE_CPP_INPUT_TEMPLATE "${CMAKE_SOURCE_DIR}/code_generation/analysis_template_friends.cxx") -else() - set(GENERATE_CPP_INPUT_TEMPLATE "${CMAKE_SOURCE_DIR}/code_generation/analysis_template.cxx") +# crownlib setup +include(ConfigureCrownlib) +if (BUILD_CROWNLIB_ONLY) + # exit if only crownlib is being built + return() endif() -set(GENERATE_CPP_SUBSET_TEMPLATE "${CMAKE_SOURCE_DIR}/code_generation/subset_template.cxx") - -message(STATUS "") -message(STATUS "Generate C++ code with following settings:") -message(STATUS " Output directory: ${GENERATE_CPP_OUTPUT_DIRECTORY}") -message(STATUS " Install directory: ${INSTALLDIR}") -message(STATUS " Template: ${GENERATE_CPP_INPUT_TEMPLATE}") -message(STATUS " Subset template: ${GENERATE_CPP_SUBSET_TEMPLATE}") -message(STATUS " Analysis: ${ANALYSIS}") -message(STATUS " Config: ${CONFIG}") -message(STATUS " Channels: ${SCOPES}") -message(STATUS " Shifts: ${SHIFTS}") -message(STATUS " Samples: ${SAMPLES}") -message(STATUS " Eras: ${ERAS}") -message(STATUS "") - -file(MAKE_DIRECTORY ${GENERATE_CPP_OUTPUT_DIRECTORY}) -# loop over all samples and eras and generate code for each one of them -foreach (ERA IN LISTS ERAS) - foreach (SAMPLE IN LISTS SAMPLES) - execute_process( - COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/generate.py --template ${GENERATE_CPP_INPUT_TEMPLATE} --subset-template ${GENERATE_CPP_SUBSET_TEMPLATE} --output ${GENERATE_CPP_OUTPUT_DIRECTORY} --analysis ${ANALYSIS} --config ${CONFIG} --scopes ${SCOPES} --shifts ${SHIFTS} --sample ${SAMPLE} --era ${ERA} --threads ${THREADS} --debug ${DEBUG_PARSED} --friends ${FRIENDS} --quantities-map ${QUANTITIESMAP} RESULT_VARIABLE ret) - if(ret EQUAL "1") - message( FATAL_ERROR "Code Generation Failed - Exiting !") - endif() - endforeach() -endforeach() - - -set(GENERATE_CPP_OUTPUT_FILELIST "${GENERATE_CPP_OUTPUT_DIRECTORY}/files.txt") -if(NOT EXISTS ${GENERATE_CPP_OUTPUT_FILELIST}) - message(FATAL_ERROR "List of generated C++ files in ${GENERATE_CPP_OUTPUT_FILELIST} does not exist.") -endif() - - -# Iterate over files from output filelist and add build and install targets -FILE(READ ${GENERATE_CPP_OUTPUT_FILELIST} FILELIST) -STRING(REGEX REPLACE "\n" ";" FILELIST ${FILELIST}) -set(TARGET_NAMES "") -# copy all correction files into the install location -install(DIRECTORY data/ DESTINATION ${INSTALLDIR}/data) -if (PAYLOADS) - install(DIRECTORY ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads DESTINATION ${INSTALLDIR}) -endif() - -# also copy inish script needed for job tarball -install(FILES init.sh DESTINATION ${INSTALLDIR}) -foreach(FILENAME ${FILELIST}) - # STRING(REGEX REPLACE ".cxx" "" TARGET_NAME ${FILENAME}) - cmake_path(GET FILENAME RELATIVE_PART RELATIVE_PATH) - cmake_path(GET FILENAME FILENAME TARGET_FILENAMENAME) - STRING(REGEX REPLACE ".cxx" "" TARGET_NAME ${TARGET_FILENAMENAME}) - STRING(REGEX REPLACE "/${TARGET_FILENAMENAME}" "" GENERATED_CODEBASE ${RELATIVE_PATH}) - - list(APPEND TARGET_NAMES ${TARGET_NAME}) - set(FULL_PATH "${GENERATE_CPP_OUTPUT_DIRECTORY}/${FILENAME}") - - # Add build target - message(STATUS "Add build target for file ${FILENAME}.") - - # message(STATUS "FULL_PATH: ${FULL_PATH} / TARGET_NAME: ${TARGET_NAME}") - # message(STATUS "Adding header files from ${GENERATE_CPP_OUTPUT_DIRECTORY}/${GENERATED_CODEBASE}/include/*") - file(GLOB GENERATED_HEADERS LIST_DIRECTORIES true "${GENERATE_CPP_OUTPUT_DIRECTORY}/${GENERATED_CODEBASE}/include/*") - file(GLOB GENERATED_CXX_FILES "${GENERATE_CPP_OUTPUT_DIRECTORY}/${GENERATED_CODEBASE}/src/*/*.cxx") - # message(STATUS "GENERATED_HEADERS ${GENERATED_HEADERS}") - add_executable(${TARGET_NAME} ${FULL_PATH} ${GENERATED_CXX_FILES}) - # Adds a pre-build event to the Target copying the correctionlib.so file into the /lib folder in the install directory - target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_SOURCE_DIR} ${ROOT_INCLUDE_DIRS} $ORIGIN/lib/ lib/) - target_link_libraries(${TARGET_NAME} ROOT::ROOTVecOps ROOT::ROOTDataFrame ROOT::RooFit ROOT::GenVector ROOT::PyMVA ROOT::ROOTTMVASofieParser logging correctionlib nlohmann_json::nlohmann_json CROWNLIB ${ONNX_RUNTIME_LIB_PATH}) - add_custom_command(TARGET ${TARGET_NAME} PRE_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different - "${CORRECTION_LIB_PATH}" - ${INSTALLDIR}/lib/libcorrectionlib.so) - # Find shared libraries next to the executable in the /lib folder - set_target_properties(${TARGET_NAME} PROPERTIES - BUILD_WITH_INSTALL_RPATH FALSE - LINK_FLAGS "-Wl,-rpath,$ORIGIN/lib") - # Add install target, basically just copying the executable around relative to CMAKE_INSTALL_PREFIX - install(TARGETS ${TARGET_NAME} DESTINATION ${INSTALLDIR}) - install(CODE "execute_process(COMMAND ${CMAKE_SOURCE_DIR}/checks/get-diff.sh ${CMAKE_SOURCE_DIR} ${ANALYSIS} ${INSTALLDIR}/diff )") +# run the code generation +include(CodeGeneration) -endforeach() +# run the build +include(Build) -# Include tests +# tests Include tests enable_testing() add_subdirectory(tests) diff --git a/analysis_configurations/unittest/producers/genparticles.py b/analysis_configurations/unittest/producers/genparticles.py index f8d9147b..60eb6c14 100644 --- a/analysis_configurations/unittest/producers/genparticles.py +++ b/analysis_configurations/unittest/producers/genparticles.py @@ -197,7 +197,7 @@ name="gen_match_2", call="quantities::tau::genmatch({df}, {output}, 1, {input})", input=[q.dileptonpair, nanoAOD.Tau_genMatch], - output=[q.gen_match_2], + output=[q.tau_gen_match_2], scopes=["mt", "et", "tt"], ) gen_taujet_pt_1 = Producer( diff --git a/analysis_configurations/unittest/producers/pairquantities.py b/analysis_configurations/unittest/producers/pairquantities.py index 5e6bfb31..5eafe375 100644 --- a/analysis_configurations/unittest/producers/pairquantities.py +++ b/analysis_configurations/unittest/producers/pairquantities.py @@ -265,7 +265,7 @@ name="gen_match_1", call="quantities::tau::genmatch({df}, {output}, 0, {input})", input=[q.dileptonpair, nanoAOD.Tau_genMatch], - output=[q.gen_match_1], + output=[q.tau_gen_match_1], scopes=["tt"], ) taujet_pt_1 = Producer( @@ -318,7 +318,7 @@ name="taugen_match_2", call="quantities::tau::genmatch({df}, {output}, 1, {input})", input=[q.dileptonpair, nanoAOD.Tau_genMatch], - output=[q.gen_match_2], + output=[q.tau_gen_match_2], scopes=["mt", "et", "tt"], ) taujet_pt_2 = Producer( diff --git a/analysis_configurations/unittest/producers/scalefactors.py b/analysis_configurations/unittest/producers/scalefactors.py index e5d8abc4..e4d2bdf2 100644 --- a/analysis_configurations/unittest/producers/scalefactors.py +++ b/analysis_configurations/unittest/producers/scalefactors.py @@ -111,14 +111,15 @@ }, ) + ############################ # Tau ID/ISO SF # The readout is done via correctionlib ############################ Tau_1_VsJetTauID_SF = ExtendedVectorProducer( name="Tau_1_VsJetTauID_SF", - call='scalefactor::tau::id_vsJet_tt({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsjet_tau_id_WP}", "{tau_sf_vsjet_tauDM0}", "{tau_sf_vsjet_tauDM1}", "{tau_sf_vsjet_tauDM10}", "{tau_sf_vsjet_tauDM11}", "{tau_vsjet_sf_dependence}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.pt_1, q.tau_decaymode_1, q.gen_match_1], + call='scalefactor::tau::id_vsJet_tt({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsjet_tau_id_WP}", "{tau_sf_vsjet_tauDM0}", "{tau_sf_vsjet_tauDM1}", "{tau_sf_vsjet_tauDM10}", "{tau_sf_vsjet_tauDM11}", "{tau_vsjet_sf_dependence}", "{tau_vsjet_vseleWP}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', + input=[q.pt_1, q.tau_decaymode_1, q.tau_gen_match_1], output="tau_1_vsjet_sf_outputname", scope=["tt"], vec_config="vsjet_tau_id", @@ -126,7 +127,7 @@ Tau_1_VsEleTauID_SF = ExtendedVectorProducer( name="Tau_1_VsEleTauID_SF", call='scalefactor::tau::id_vsEle({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsele_tau_id_WP}", "{tau_sf_vsele_barrel}", "{tau_sf_vsele_endcap}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.eta_1, q.tau_decaymode_1, q.gen_match_1], + input=[q.eta_1, q.tau_decaymode_1, q.tau_gen_match_1], output="tau_1_vsele_sf_outputname", scope=["tt"], vec_config="vsele_tau_id", @@ -134,23 +135,23 @@ Tau_1_VsMuTauID_SF = ExtendedVectorProducer( name="Tau_1_VsMuTauID_SF", call='scalefactor::tau::id_vsMu({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsmu_tau_id_WP}", "{tau_sf_vsmu_wheel1}", "{tau_sf_vsmu_wheel2}", "{tau_sf_vsmu_wheel3}", "{tau_sf_vsmu_wheel4}", "{tau_sf_vsmu_wheel5}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.eta_1, q.tau_decaymode_1, q.gen_match_1], + input=[q.eta_1, q.tau_decaymode_1, q.tau_gen_match_1], output="tau_1_vsmu_sf_outputname", scope=["tt"], vec_config="vsmu_tau_id", ) Tau_2_VsJetTauID_lt_SF = ExtendedVectorProducer( name="Tau_2_VsJetTauID_lt_SF", - call='scalefactor::tau::id_vsJet_lt({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsjet_tau_id_WP}", "{tau_sf_vsjet_tau30to35}", "{tau_sf_vsjet_tau35to40}", "{tau_sf_vsjet_tau40to500}", "{tau_sf_vsjet_tau500to1000}", "{tau_sf_vsjet_tau1000toinf}", "{tau_vsjet_sf_dependence}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.pt_2, q.tau_decaymode_2, q.gen_match_2], + call='scalefactor::tau::id_vsJet_lt({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsjet_tau_id_WP}", "{tau_sf_vsjet_tau30to35}", "{tau_sf_vsjet_tau35to40}", "{tau_sf_vsjet_tau40to500}", "{tau_sf_vsjet_tau500to1000}", "{tau_sf_vsjet_tau1000toinf}", "{tau_vsjet_sf_dependence}", "{tau_vsjet_vseleWP}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', + input=[q.pt_2, q.tau_decaymode_2, q.tau_gen_match_2], output="tau_2_vsjet_sf_outputname", scope=["et", "mt"], vec_config="vsjet_tau_id", ) Tau_2_VsJetTauID_tt_SF = ExtendedVectorProducer( name="Tau_2_VsJetTauID_tt_SF", - call='scalefactor::tau::id_vsJet_tt({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsjet_tau_id_WP}", "{tau_sf_vsjet_tauDM0}", "{tau_sf_vsjet_tauDM1}", "{tau_sf_vsjet_tauDM10}", "{tau_sf_vsjet_tauDM11}", "{tau_vsjet_sf_dependence}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.pt_2, q.tau_decaymode_2, q.gen_match_2], + call='scalefactor::tau::id_vsJet_tt({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsjet_tau_id_WP}", "{tau_sf_vsjet_tauDM0}", "{tau_sf_vsjet_tauDM1}", "{tau_sf_vsjet_tauDM10}", "{tau_sf_vsjet_tauDM11}", "{tau_vsjet_sf_dependence}", "{tau_vsjet_vseleWP}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', + input=[q.pt_2, q.tau_decaymode_2, q.tau_gen_match_2], output="tau_2_vsjet_sf_outputname", scope=["tt"], vec_config="vsjet_tau_id", @@ -158,7 +159,7 @@ Tau_2_VsEleTauID_SF = ExtendedVectorProducer( name="Tau_2_VsEleTauID_SF", call='scalefactor::tau::id_vsEle({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsele_tau_id_WP}", "{tau_sf_vsele_barrel}", "{tau_sf_vsele_endcap}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.eta_2, q.tau_decaymode_2, q.gen_match_2], + input=[q.eta_2, q.tau_decaymode_2, q.tau_gen_match_2], output="tau_2_vsele_sf_outputname", scope=["et", "mt", "tt"], vec_config="vsele_tau_id", @@ -166,7 +167,7 @@ Tau_2_VsMuTauID_SF = ExtendedVectorProducer( name="Tau_2_VsMuTauID_SF", call='scalefactor::tau::id_vsMu({df}, {input}, {vec_open}{tau_dms}{vec_close}, "{vsmu_tau_id_WP}", "{tau_sf_vsmu_wheel1}", "{tau_sf_vsmu_wheel2}", "{tau_sf_vsmu_wheel3}", "{tau_sf_vsmu_wheel4}", "{tau_sf_vsmu_wheel5}", {output}, "{tau_sf_file}", "{tau_id_discriminator}")', - input=[q.eta_2, q.tau_decaymode_2, q.gen_match_2], + input=[q.eta_2, q.tau_decaymode_2, q.tau_gen_match_2], output="tau_2_vsmu_sf_outputname", scope=["et", "mt", "tt"], vec_config="vsmu_tau_id", diff --git a/analysis_configurations/unittest/quantities/output.py b/analysis_configurations/unittest/quantities/output.py index 63bf3c4d..d339b645 100644 --- a/analysis_configurations/unittest/quantities/output.py +++ b/analysis_configurations/unittest/quantities/output.py @@ -58,8 +58,8 @@ iso_2 = Quantity("iso_2") decaymode_1 = Quantity("decaymode_1") decaymode_2 = Quantity("decaymode_2") -gen_match_1 = Quantity("gen_match_1") -gen_match_2 = Quantity("gen_match_2") +tau_gen_match_1 = Quantity("tau_gen_match_1") +tau_gen_match_2 = Quantity("tau_gen_match_2") gen_tau_pt_1 = Quantity("gen_tau_pt_1") gen_tau_pt_2 = Quantity("gen_tau_pt_2") gen_tau_eta_1 = Quantity("gen_tau_eta_1") diff --git a/analysis_configurations/unittest/unittest_config.py b/analysis_configurations/unittest/unittest_config.py index 37728bc9..313d18f7 100644 --- a/analysis_configurations/unittest/unittest_config.py +++ b/analysis_configurations/unittest/unittest_config.py @@ -205,14 +205,10 @@ def build_config( "vsjet_tau_id_WPbit": bit, } for wp, bit in { - "VVVLoose": 1, - "VVLoose": 2, - "VLoose": 3, "Loose": 4, "Medium": 5, "Tight": 6, "VTight": 7, - "VVTight": 8, }.items() ], "vsele_tau_id": [ @@ -275,6 +271,7 @@ def build_config( "tau_sf_vsjet_tau500to1000": "nom", "tau_sf_vsjet_tau1000toinf": "nom", "tau_vsjet_sf_dependence": "pt", # or "dm", "eta" + "tau_vsjet_vseleWP": "VVLoose", }, ) # TT tau id sf variations @@ -286,6 +283,7 @@ def build_config( "tau_sf_vsjet_tauDM10": "nom", "tau_sf_vsjet_tauDM11": "nom", "tau_vsjet_sf_dependence": "dm", # or "dm", "eta" + "tau_vsjet_vseleWP": "VVLoose", }, ) # MT / ET tau selection @@ -568,19 +566,19 @@ def build_config( # "global", # RemoveProducer( # producers=[event.PUweights, event.npartons], - # samples=["data", "emb", "emb_mc"], + # samples=["data", "embedding", "embedding_mc"], # ), # ) configuration.add_modification_rule( scopes, AppendProducer( producers=[event.GGH_NNLO_Reweighting, event.GGH_WG1_Uncertainties], - samples="ggh", + samples="ggh_htautau", ), ) configuration.add_modification_rule( scopes, - AppendProducer(producers=event.QQH_WG1_Uncertainties, samples="qqh"), + AppendProducer(producers=event.QQH_WG1_Uncertainties, samples="vbf_htautau"), ) configuration.add_modification_rule( scopes, @@ -588,24 +586,25 @@ def build_config( ) configuration.add_modification_rule( scopes, - AppendProducer(producers=event.ZPtMassReweighting, samples="dy"), + AppendProducer(producers=event.ZPtMassReweighting, samples="dyjets"), ) # changes needed for data # global scope configuration.add_modification_rule( "global", AppendProducer( - producers=jets.RenameJetsData, samples=["data", "emb", "emb_mc"] + producers=jets.RenameJetsData, samples=["data", "embedding", "embedding_mc"] ), ) configuration.add_modification_rule( "global", - AppendProducer(producers=event.JSONFilter, samples=["data", "emb"]), + AppendProducer(producers=event.JSONFilter, samples=["data", "embedding"]), ) configuration.add_modification_rule( "global", RemoveProducer( - producers=jets.JetEnergyCorrection, samples=["data", "emb", "emb_mc"] + producers=jets.JetEnergyCorrection, + samples=["data", "embedding", "embedding_mc"], ), ) # scope specific @@ -740,7 +739,7 @@ def build_config( pairquantities.VsMuTauIDFlag_2.output_group, q.taujet_pt_2, q.gen_taujet_pt_2, - q.gen_match_2, + q.tau_gen_match_2, q.muon_veto_flag, q.dimuon_veto, q.electron_veto_flag, @@ -761,7 +760,7 @@ def build_config( pairquantities.VsMuTauIDFlag_2.output_group, q.taujet_pt_2, q.gen_taujet_pt_2, - q.gen_match_2, + q.tau_gen_match_2, q.muon_veto_flag, q.dimuon_veto, q.electron_veto_flag, @@ -786,8 +785,8 @@ def build_config( q.taujet_pt_1, q.taujet_pt_2, q.decaymode_1, - q.gen_match_1, - q.gen_match_2, + q.tau_gen_match_1, + q.tau_gen_match_2, ], ) @@ -798,7 +797,7 @@ def build_config( ], ) # not available in nanoAOD test sample - # if "data" not in sample and "emb" not in sample: + # if "data" not in sample and "embedding" not in sample: # configuration.add_outputs( # scopes, # [ @@ -894,11 +893,7 @@ def build_config( }, scopes=["global"], ), - samples=[ - sample - for sample in available_sample_types - if sample not in ["data", "emb", "emb_mc"] - ], + exclude_samples=["data", "embedding", "embedding_mc"], ) configuration.add_shift( SystematicShiftByQuantity( @@ -909,11 +904,7 @@ def build_config( }, scopes=["global"], ), - samples=[ - sample - for sample in available_sample_types - if sample not in ["data", "emb", "emb_mc"] - ], + exclude_samples=["data", "embedding", "embedding_mc"], ) ######################### # Jet energy resolution @@ -934,11 +925,7 @@ def build_config( } }, ), - samples=[ - sample - for sample in available_sample_types - if sample not in ["data", "embedding", "embedding_mc"] - ], + exclude_samples=["data", "embedding", "embedding_mc"], ) configuration.add_shift( SystematicShift( @@ -955,11 +942,7 @@ def build_config( } }, ), - samples=[ - sample - for sample in available_sample_types - if sample not in ["data", "embedding", "embedding_mc"] - ], + exclude_samples=["data", "embedding", "embedding_mc"], ) ######################### diff --git a/cmake/AddBaseDependencies.cmake b/cmake/AddBaseDependencies.cmake new file mode 100644 index 00000000..1da5a90c --- /dev/null +++ b/cmake/AddBaseDependencies.cmake @@ -0,0 +1,5 @@ +# add OpenMP and MPI +find_package(OpenMP) +find_package(MPI) +# add nlohmann json +find_package(nlohmann_json) diff --git a/cmake/AddCorrectionlib.cmake b/cmake/AddCorrectionlib.cmake new file mode 100644 index 00000000..b71ebdad --- /dev/null +++ b/cmake/AddCorrectionlib.cmake @@ -0,0 +1,42 @@ +function(install_correctionlib) + execute_process( + COMMAND "${Python_EXECUTABLE}" "-c" + "import correctionlib; print(correctionlib.__version__)" + RESULT_VARIABLE PACKAGE_NOT_FOUND + OUTPUT_VARIABLE PACKAGE_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(${PACKAGE_NOT_FOUND} EQUAL 1) + execute_process( + COMMAND ${Python_EXECUTABLE} -m pip install ${_pip_args} + git+https://github.com/cms-nanoAOD/correctionlib.git) + endif() + message(STATUS "Found correctionlib !") +endfunction() + +# Adding correctionlib for scale factor evaluation for now the official pip +# package has some problem in the future "find_python_package(correctionlib +# correctionlib X.X)" should hopefully work +install_correctionlib() +message(STATUS "Setting up correctionlib ...") +execute_process( + COMMAND correction config --cmake + OUTPUT_VARIABLE CORRECTION_LIB_ARGS + OUTPUT_STRIP_TRAILING_WHITESPACE) +string(REPLACE -Dcorrectionlib_DIR= "" CORRECTIONLIBPATH ${CORRECTION_LIB_ARGS}) +# if correctionlib comes from cvmfs, change the correctionlibpath accordingly +if(${CORRECTIONLIBPATH} MATCHES "^/cvmfs/") + message(STATUS "Setting up correctionlib from cvmfs ...") + set(USING_CVMFS TRUE) + find_package(correctionlib) + find_library(CORRECTION_LIB_PATH correctionlib) +else() + message(STATUS "Setting up correctionlib from local setup ...") + set(USING_CVMFS FALSE) + find_package(correctionlib REQUIRED PATHS ${CORRECTIONLIBPATH}) + set(CORRECTION_LIB_PATH "${CORRECTIONLIBPATH}/../lib/libcorrectionlib.so") +endif() +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads) +find_package(ZLIB) +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) +message(STATUS "Correctionlib library path: ${CORRECTION_LIB_PATH}") diff --git a/cmake/AddLogging.cmake b/cmake/AddLogging.cmake new file mode 100644 index 00000000..84999e3e --- /dev/null +++ b/cmake/AddLogging.cmake @@ -0,0 +1,34 @@ +message(STATUS "Including spdlog.") +# Build the logging library +include(ExternalProject) +ExternalProject_Add( + spdlog + PREFIX spdlog + GIT_REPOSITORY https://github.com/gabime/spdlog.git + GIT_SHALLOW 1 + GIT_TAG v1.8.5 + CMAKE_ARGS -DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD} + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX=${CMAKE_BINARY_DIR} -DCMAKE_CXX_FLAGS=-fpic + LOG_DOWNLOAD 1 + LOG_CONFIGURE 1 + LOG_BUILD 1 + LOG_INSTALL 1 + BUILD_BYPRODUCTS ${CMAKE_INSTALL_PREFIX}/lib64/libspdlog.a + BUILD_BYPRODUCTS ${CMAKE_INSTALL_PREFIX}/lib/libspdlog.a) + +message(STATUS "Configuring spdlog.") +# Make an imported target out of the build logging library +add_library(logging STATIC IMPORTED) +file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/include" +)# required because the include dir must be existent for + # INTERFACE_INCLUDE_DIRECTORIES +include(GNUInstallDirs) # required to populate CMAKE_INSTALL_LIBDIR with lib or + # lib64 required for the destination of libspdlog.a +set_target_properties( + logging + PROPERTIES IMPORTED_LOCATION + "${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}/libspdlog.a" + INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_BINARY_DIR}/include") +add_dependencies(logging spdlog) # enforces to build spdlog before making the + # imported target diff --git a/cmake/AddOnnxruntime.cmake b/cmake/AddOnnxruntime.cmake new file mode 100644 index 00000000..a4ea7c94 --- /dev/null +++ b/cmake/AddOnnxruntime.cmake @@ -0,0 +1,34 @@ +# Find ONNXRuntime first check if we have an LCG stack via LCG_VERSION +# environment variable +if(DEFINED ENV{LCG_VERSION}) + string(REPLACE ":" ";" RUNTIME_PATH "$ENV{LD_LIBRARY_PATH}") + message(STATUS "Found LCG stack, using it to find ONNXRuntime") + find_library( + ONNX_RUNTIME_LIB_PATH + NAMES onnxruntime + HINTS ${RUNTIME_PATH}) + if(ONNX_RUNTIME_LIB_PATH) + # get the real path of the library to find the include directory + get_filename_component(ONNX_RUNTIME_LIB_PATH ${ONNX_RUNTIME_LIB_PATH} + REALPATH) + get_filename_component(ONNX_RUNTIME_INCLUDE_PATH + ${ONNX_RUNTIME_LIB_PATH}/../../include REALPATH) + message(STATUS "ONNXRuntime include path: ${ONNX_RUNTIME_INCLUDE_PATH}/core/session") + include_directories("${ONNX_RUNTIME_INCLUDE_PATH}/core/session") + endif() + message(STATUS "ONNXRuntime library path: ${ONNX_RUNTIME_LIB_PATH}") +else() + set(ONNXRUNTIME_INCLUDE_DIR "" CACHE FILEPATH "Path to ONNXRUNTIME includes") + message(STATUS "Running in CI, take Onnxruntime from pre-build") + if(NOT EXISTS ${ONNXRUNTIME_INCLUDE_DIR}/include/onnxruntime/core/session/onnxruntime_cxx_api.h) + message(SEND_ERROR "Can't find onnxruntime_cxx_api.h in ${ONNXRUNTIME_INCLUDE_DIR}/include/onnxruntime/core/session") + else() + message(STATUS "ONNXRuntime include path: ${ONNXRUNTIME_INCLUDE_DIR}/include/onnxruntime/core/session") + include_directories("${ONNXRUNTIME_INCLUDE_DIR}/include/onnxruntime/core/session") + # lib file is found in ${ONNXRUNTIME_INCLUDE_DIR}/build/Linux/Release + find_library( + ONNX_RUNTIME_LIB_PATH + NAMES onnxruntime + HINTS ${ONNXRUNTIME_INCLUDE_DIR}/build/Linux/Release) + endif() +endif() diff --git a/cmake/AddRoot.cmake b/cmake/AddRoot.cmake new file mode 100644 index 00000000..7403cb75 --- /dev/null +++ b/cmake/AddRoot.cmake @@ -0,0 +1,43 @@ +find_package(ROOT 6.26 REQUIRED COMPONENTS ROOTVecOps ROOTDataFrame RooFit + GenVector) + +message(STATUS "") +message(STATUS "Found ROOT with following settings:") +message(STATUS " Version: ${ROOT_VERSION}") +message(STATUS " ROOT executable: ${ROOT_EXECUTABLE}") +message(STATUS " Include directories: ${ROOT_INCLUDE_DIRS}") +message(STATUS " Compiler flags: ${ROOT_CXX_FLAGS}") +message(STATUS "") + +# Add ROOT flags to compile options, e.g. we have to use the same C++ standard +# Note that the flags from the build type, e.g. CMAKE_CXX_FLAGS_RELEASE, are +# automatically appended. You can check this during build time by enabling the +# verbose make output with "VERBOSE=1 make". +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ROOT_CXX_FLAGS}") + +# Use -fconcepts with g++ to silence following warning: warning: use of 'auto' +# in parameter declaration only available with '-fconcepts +if(CMAKE_CXX_COMPILER_ID STREQUAL GNU) + message(STATUS "Attach -fconcepts to the compiler flags to silence warnings.") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fconcepts") +endif() + +# Find the C++ standard from ROOT and set it as the standard of this project We +# require the C++ standard 17 or 20 and don't want to fall back to lower +# versions. +set(CMAKE_CXX_STANDARD_REQUIRED ON) +if(${ROOT_CXX_FLAGS} MATCHES "\\-std\\=c\\+\\+17") + message(STATUS "Set c++17 as the C++ standard.") + set(CMAKE_CXX_STANDARD 17) +elseif(${ROOT_CXX_FLAGS} MATCHES "\\-std\\=c\\+\\+20") + message(STATUS "Set c++20 as the C++ standard.") + set(CMAKE_CXX_STANDARD 20) +elseif(${ROOT_CXX_FLAGS} MATCHES "\\-std\\=c\\+\\+14") + message(STATUS "c++14 found, setting c++17 as the C++ standard.") + set(CMAKE_CXX_STANDARD 17) +else() + message( + FATAL_ERROR + "The standard c++17 or higher is required but not found in the ROOT flags: ${ROOT_CXX_FLAGS}" + ) +endif() diff --git a/cmake/Build.cmake b/cmake/Build.cmake new file mode 100644 index 00000000..8a64e242 --- /dev/null +++ b/cmake/Build.cmake @@ -0,0 +1,80 @@ +set(GENERATE_CPP_OUTPUT_FILELIST "${GENERATE_CPP_OUTPUT_DIRECTORY}/files.txt") +if(NOT EXISTS ${GENERATE_CPP_OUTPUT_FILELIST}) + message( + FATAL_ERROR + "List of generated C++ files in ${GENERATE_CPP_OUTPUT_FILELIST} does not exist." + ) +endif() + +# Iterate over files from output filelist and add build and install targets +file(READ ${GENERATE_CPP_OUTPUT_FILELIST} FILELIST) +string(REGEX REPLACE "\n" ";" FILELIST ${FILELIST}) +set(TARGET_NAMES "") +# copy all correction files into the install location +install(DIRECTORY data/ DESTINATION ${INSTALLDIR}/data) +if(PAYLOADS) + install( + DIRECTORY ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads + DESTINATION ${INSTALLDIR}) +endif() + +# also copy inish script needed for job tarball +install(FILES init.sh DESTINATION ${INSTALLDIR}) +foreach(FILENAME ${FILELIST}) + # STRING(REGEX REPLACE ".cxx" "" TARGET_NAME ${FILENAME}) + cmake_path(GET FILENAME RELATIVE_PART RELATIVE_PATH) + cmake_path(GET FILENAME FILENAME TARGET_FILENAMENAME) + string(REGEX REPLACE ".cxx" "" TARGET_NAME ${TARGET_FILENAMENAME}) + string(REGEX REPLACE "/${TARGET_FILENAMENAME}" "" GENERATED_CODEBASE + ${RELATIVE_PATH}) + + list(APPEND TARGET_NAMES ${TARGET_NAME}) + set(FULL_PATH "${GENERATE_CPP_OUTPUT_DIRECTORY}/${FILENAME}") + + # Add build target + message(STATUS "Add build target for file ${FILENAME}.") + + # message(STATUS "FULL_PATH: ${FULL_PATH} / TARGET_NAME: ${TARGET_NAME}") + # message(STATUS "Adding header files from + # ${GENERATE_CPP_OUTPUT_DIRECTORY}/${GENERATED_CODEBASE}/include/*") + file( + GLOB GENERATED_HEADERS + LIST_DIRECTORIES true + "${GENERATE_CPP_OUTPUT_DIRECTORY}/${GENERATED_CODEBASE}/include/*") + file(GLOB GENERATED_CXX_FILES + "${GENERATE_CPP_OUTPUT_DIRECTORY}/${GENERATED_CODEBASE}/src/*/*.cxx") + # message(STATUS "GENERATED_HEADERS ${GENERATED_HEADERS}") + add_executable(${TARGET_NAME} ${FULL_PATH} ${GENERATED_CXX_FILES}) + # Adds a pre-build event to the Target copying the correctionlib.so file into + # the /lib folder in the install directory + target_include_directories( + ${TARGET_NAME} PRIVATE ${CMAKE_SOURCE_DIR} ${ROOT_INCLUDE_DIRS} + $ORIGIN/lib/ lib/) + target_link_libraries( + ${TARGET_NAME} + ROOT::ROOTVecOps + ROOT::ROOTDataFrame + ROOT::RooFit + ROOT::GenVector + logging + correctionlib + nlohmann_json::nlohmann_json + CROWNLIB + ${ONNX_RUNTIME_LIB_PATH}) + add_custom_command( + TARGET ${TARGET_NAME} + PRE_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different "${CORRECTION_LIB_PATH}" + ${INSTALLDIR}/lib/libcorrectionlib.so) + # Find shared libraries next to the executable in the /lib folder + set_target_properties( + ${TARGET_NAME} PROPERTIES BUILD_WITH_INSTALL_RPATH FALSE + LINK_FLAGS "-Wl,-rpath,$ORIGIN/lib") + # Add install target, basically just copying the executable around relative to + # CMAKE_INSTALL_PREFIX + install(TARGETS ${TARGET_NAME} DESTINATION ${INSTALLDIR}) + install( + CODE "execute_process(COMMAND ${CMAKE_SOURCE_DIR}/checks/get-diff.sh ${CMAKE_SOURCE_DIR} ${ANALYSIS} ${INSTALLDIR}/diff )" + ) + +endforeach() diff --git a/cmake/CodeGeneration.cmake b/cmake/CodeGeneration.cmake new file mode 100644 index 00000000..411c3418 --- /dev/null +++ b/cmake/CodeGeneration.cmake @@ -0,0 +1,49 @@ +# Generate the C++ code +if(FRIENDS) + set(GENERATE_CPP_INPUT_TEMPLATE + "${CMAKE_SOURCE_DIR}/code_generation/analysis_template_friends.cxx") +else() + set(GENERATE_CPP_INPUT_TEMPLATE + "${CMAKE_SOURCE_DIR}/code_generation/analysis_template.cxx") +endif() +set(GENERATE_CPP_SUBSET_TEMPLATE + "${CMAKE_SOURCE_DIR}/code_generation/subset_template.cxx") + +message(STATUS "") +message(STATUS "Generate C++ code with following settings:") +message(STATUS " Output directory: ${GENERATE_CPP_OUTPUT_DIRECTORY}") +message(STATUS " Install directory: ${INSTALLDIR}") +message(STATUS " Template: ${GENERATE_CPP_INPUT_TEMPLATE}") +message(STATUS " Subset template: ${GENERATE_CPP_SUBSET_TEMPLATE}") +message(STATUS " Analysis: ${ANALYSIS}") +message(STATUS " Config: ${CONFIG}") +message(STATUS " Channels: ${SCOPES}") +message(STATUS " Shifts: ${SHIFTS}") +message(STATUS " Samples: ${SAMPLES}") +message(STATUS " Eras: ${ERAS}") +message(STATUS "") + +file(MAKE_DIRECTORY ${GENERATE_CPP_OUTPUT_DIRECTORY}) +# loop over all samples and eras and generate code for each one of them +foreach(ERA IN LISTS ERAS) + foreach(SAMPLE IN LISTS SAMPLES) + message("Generating code for sample ${SAMPLE} and era ${ERA}") + message( + "Running command: ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/generate.py --template ${GENERATE_CPP_INPUT_TEMPLATE} --subset-template ${GENERATE_CPP_SUBSET_TEMPLATE} --output ${GENERATE_CPP_OUTPUT_DIRECTORY} --analysis ${ANALYSIS} --config ${CONFIG} --scopes ${SCOPES} --shifts ${SHIFTS} --sample ${SAMPLE} --era ${ERA} --threads ${THREADS} --debug ${DEBUG_PARSED} --friends ${FRIENDS} --quantities-map ${QUANTITIESMAP}" + ) + execute_process( + COMMAND + ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/generate.py --template + ${GENERATE_CPP_INPUT_TEMPLATE} --subset-template + ${GENERATE_CPP_SUBSET_TEMPLATE} --output + ${GENERATE_CPP_OUTPUT_DIRECTORY} --analysis ${ANALYSIS} --config + ${CONFIG} --scopes ${SCOPES} --shifts ${SHIFTS} --sample ${SAMPLE} --era + ${ERA} --threads ${THREADS} --debug ${DEBUG_PARSED} --friends ${FRIENDS} + --quantities-map ${QUANTITIESMAP} + RESULT_VARIABLE ret + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) + if(ret EQUAL "1") + message(FATAL_ERROR "Code Generation Failed - Exiting !") + endif() + endforeach() +endforeach() diff --git a/cmake/ConfigureCrownlib.cmake b/cmake/ConfigureCrownlib.cmake new file mode 100644 index 00000000..134eb1ce --- /dev/null +++ b/cmake/ConfigureCrownlib.cmake @@ -0,0 +1,54 @@ +# build a shared lib from all CROWN functions +include_directories(${CMAKE_SOURCE_DIR}/src) +include_directories(${CMAKE_SOURCE_DIR}/include) +file(GLOB SOURCES_1 ${CMAKE_SOURCE_DIR}/src/*.cxx) +file(GLOB SOURCES_2 ${CMAKE_SOURCE_DIR}/src/utility/*.cxx + ${CMAKE_SOURCE_DIR}/src/RecoilCorrections/*.cxx + ${CMAKE_SOURCE_DIR}/src/SVFit/*.cxx) +set(SOURCES ${SOURCES_1} ${SOURCES_2}) + +if(BUILD_CROWNLIB_ONLY) + message(STATUS "Building only the CROWNLIB library") + add_library(CROWNLIB SHARED ${SOURCES}) + target_include_directories(CROWNLIB PRIVATE ${CMAKE_SOURCE_DIR} + ${ROOT_INCLUDE_DIRS}) + target_link_libraries( + CROWNLIB + ROOT::ROOTVecOps + ROOT::ROOTDataFrame + ROOT::RooFit + ROOT::GenVector + logging + correctionlib + nlohmann_json::nlohmann_json + ${ONNX_RUNTIME_LIB_PATH}) + install(TARGETS CROWNLIB DESTINATION ${INSTALLDIR}/lib) + return() +endif() +# check if CROWNLIB is already installed +find_library( + CROWNLIB_FOUND CROWNLIB HINTS ${INSTALLDIR}/lib ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_BINARY_DIR}/lib) +if(NOT CROWNLIB_FOUND OR REBUILD_CROWN_LIB) + message(STATUS "CROWNLIB not found, building it") + # CROWNLIB not found, build it + add_library(CROWNLIB SHARED ${SOURCES}) + target_include_directories(CROWNLIB PRIVATE ${CMAKE_SOURCE_DIR} + ${ROOT_INCLUDE_DIRS}) + target_link_libraries( + CROWNLIB + ROOT::ROOTVecOps + ROOT::ROOTDataFrame + ROOT::RooFit + ROOT::GenVector + logging + correctionlib + nlohmann_json::nlohmann_json + ${ONNX_RUNTIME_LIB_PATH}) + install(TARGETS CROWNLIB DESTINATION ${INSTALLDIR}/lib) +else() + message(STATUS "Found CROWNLIB in ${CROWNLIB_FOUND}") + install(FILES ${CROWNLIB_FOUND} DESTINATION ${INSTALLDIR}/lib) + link_directories(${INSTALLDIR}/lib ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_BINARY_DIR}/lib) +endif() diff --git a/cmake/ConfigurePython.cmake b/cmake/ConfigurePython.cmake new file mode 100644 index 00000000..b381f700 --- /dev/null +++ b/cmake/ConfigurePython.cmake @@ -0,0 +1,41 @@ +# Find Python 3 +find_package(Python 3.9 REQUIRED COMPONENTS Interpreter) + +# detect virtualenv and set Pip args accordingly +if(DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX}) + set(_pip_args) +else() + set(_pip_args "--user") +endif() + +function(find_python_package PYPINAME NAME MIN_VERSION) + execute_process( + COMMAND "${Python_EXECUTABLE}" "-c" + "import ${NAME}; print(${NAME}.__version__)" + RESULT_VARIABLE PACKAGE_NOT_FOUND + OUTPUT_VARIABLE PACKAGE_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(${PACKAGE_NOT_FOUND} EQUAL 1) + execute_process(COMMAND ${Python_EXECUTABLE} -m pip install ${PYPINAME} + ${_pip_args}) + execute_process( + COMMAND "${Python_EXECUTABLE}" "-c" + "import ${NAME}; print(${NAME}.__version__)" + RESULT_VARIABLE PACKAGE_NOT_FOUND + OUTPUT_VARIABLE PACKAGE_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(${PACKAGE_NOT_FOUND} EQUAL 1) + message(FATAL_ERROR "Failed to import ${PYPINAME} or get version.") + endif() + endif() + if(PACKAGE_VERSION VERSION_LESS MIN_VERSION) + message( + FATAL_ERROR + "The version of Python package ${PYPINAME} is too old (found ${PACKAGE_VERSION}, require at least ${MIN_VERSION})." + ) + endif() + message( + STATUS + "Found Python package ${PYPINAME} (require ${MIN_VERSION}, found ${PACKAGE_VERSION})" + ) +endfunction() diff --git a/cmake/ParseArguments.cmake b/cmake/ParseArguments.cmake new file mode 100644 index 00000000..b135eeaa --- /dev/null +++ b/cmake/ParseArguments.cmake @@ -0,0 +1,169 @@ +# read debug and optimized flags from command line +option(BUILD_CROWNLIB_ONLY "Build only the CROWNLIB library" OFF) +set(REBUILD_CROWN_LIB "false") # used for non-production mode + +if(NOT DEFINED DEBUG) + message( + STATUS + "No Debug mode set, activate with -DDEBUG=true --> compile with debug symbols and run code generation with debug output" + ) + set(DEBUG "false") +endif() + +if(NOT DEFINED OPTIMIZED) + message( + STATUS + "No Optimization not set, building with -DOPTIMIZED=true --> slower build times but faster runtimes" + ) + set(OPTIMIZED "true") +endif() +# Convert args to lowercase +string(TOLOWER "${DEBUG}" DEBUG_PARSED) +string(TOLOWER "${OPTIMIZED}" OPTIMIZED_PARSED) +if(DEBUG_PARSED STREQUAL "true") + message(STATUS "Debug mode") + set(CMAKE_BUILD_TYPE + "Debug" + CACHE + STRING + "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel." + ) + set(CMAKE_CXX_FLAGS_DEBUG + "-g" + CACHE STRING "Set default compiler flags for build type Debug") +else() + set(DEBUG_PARSED "false") + if(OPTIMIZED_PARSED STREQUAL "true") + message(STATUS "Optimized mode") + set(CMAKE_BUILD_TYPE + "Release" + CACHE + STRING + "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel." + ) + set(CMAKE_CXX_FLAGS_RELEASE + "-O3 -DNDEBUG" + CACHE STRING "Set default compiler flags for build type Release") + find_program(CCACHE_FOUND ccache) + if(CCACHE_FOUND) + message(STATUS "ccache found at ${CCACHE_FOUND}, using it for compilation") + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${CCACHE_FOUND}") + endif() + else() + message(STATUS "Unoptimized mode") + set(CMAKE_BUILD_TYPE + "Release" + CACHE + STRING + "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel." + ) + set(CMAKE_CXX_FLAGS_RELEASE + "-DNDEBUG" + CACHE STRING "Set default compiler flags for build type Release") + endif() +endif() +# Only parse additional args if not building only the CROWNLIB library +if(NOT BUILD_CROWNLIB_ONLY) + + if(NOT DEFINED ANALYSIS) + message( + FATAL_ERROR + "Please specify the Analysis to be used with -DANALYSIS=my_analysis_name" + ) + endif() + # if analysis is set, check the folder to find any potential payload files to + # be used + file(GLOB PAYLOADS + ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads/*) + if(NOT PAYLOADS) + message( + STATUS + "No payload files found in ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads/ for analysis ${ANALYSIS}" + ) + else() + message( + STATUS + "Found payload files in ${CMAKE_SOURCE_DIR}/analysis_configurations/${ANALYSIS}/payloads/ for analysis ${ANALYSIS}" + ) + endif() + + if(NOT DEFINED CONFIG) + message( + FATAL_ERROR + "Please specify the config to be used with -DCONFIG=my_config_name") + endif() + + if(NOT DEFINED SCOPES) + message( + FATAL_ERROR + "No scope specificed, set the scopes via comma seperated list e.g. -DSCOPES=et,mt,tt,em" + ) + endif() + + if(NOT DEFINED SHIFTS) + message( + STATUS + "No shifts specificed, using -DSHIFTS=all. If you want to run nominal only, use -DSHIFTS=none" + ) + set(SHIFTS "all") + endif() + + if(NOT DEFINED QUANTITIESMAP) + message( + STATUS + "No quantities map specified, none will be used. If you want to produce friends, you have to specify quantities maps for all friend files e.g. -DQUANTITIESMAP=quantities_map_1.json,quantities_map_2.json. The input can be a comma-separated list of JSON files and/or root files (for debugging purposes)." + ) + set(FRIENDS "false") + set(QUANTITIESMAP "none") + else() + set(FRIENDS "true") + endif() + + if(NOT DEFINED SAMPLES) + message( + FATAL_ERROR "Please specify the samples to be used with -DSAMPLES=samples" + ) + endif() + + if(NOT DEFINED ERAS) + message(FATAL_ERROR "Please specify the eras to be used with -DERAS=eras") + endif() + + if(NOT DEFINED PRODUCTION) + message( + STATUS + "No production mode set --> will rebuild the CROWNLIB library if necessary" + ) + set(REBUILD_CROWN_LIB "true") + endif() + if(NOT DEFINED THREADS) + message( + STATUS "No threads set, using single threaded mode with -DTHREADS=1") + set(THREADS "1") + endif() + string(REPLACE "," ";" ERAS "${ERAS}") + string(REPLACE "," ";" SAMPLES "${SAMPLES}") + message(STATUS "---------------------------------------------") + message(STATUS "|> Set up analysis for scopes ${SCOPES}.") + message(STATUS "|> Set up analysis for ${ANALYSIS}.") + message(STATUS "|> Set up analysis for config ${CONFIG}.") + message(STATUS "|> Set up analysis for samples ${SAMPLES}.") + message(STATUS "|> Set up analysis for eras ${ERAS}.") + message(STATUS "|> Set up analysis for shifts ${SHIFTS}.") + message(STATUS "|> Set up analysis with ${THREADS} threads.") + message(STATUS "|> Set up analysis with debug mode : ${DEBUG_PARSED}.") + message( + STATUS "|> Set up analysis with optimization mode : ${OPTIMIZED_PARSED}.") + message(STATUS "|> generator is set to ${CMAKE_GENERATOR}") + # Define the default compiler flags for different build types, if different + # from the cmake defaults The build type should be set so that the correct + # compiler flags are chosen + message( + STATUS + "|> Code generation arguments: --analysis ${ANALYSIS} --config ${CONFIG} --scopes ${SCOPES} --shifts ${SHIFTS} --samples ${SAMPLES} --eras ${ERAS} --threads ${THREADS} --debug ${DEBUG_PARSED} --friends ${FRIENDS} --quantities_map ${QUANTITIESMAP}" + ) + message(STATUS "---------------------------------------------") +else() # if building only the CROWNLIB library + message(STATUS "Building only the CROWNLIB library") + message(STATUS "No additional arguments parsed") +endif() diff --git a/code_generation/analysis_template.cxx b/code_generation/analysis_template.cxx index 9b560d79..348afc3d 100644 --- a/code_generation/analysis_template.cxx +++ b/code_generation/analysis_template.cxx @@ -27,6 +27,7 @@ #include #include #include +#include "onnxruntime_cxx_api.h" #include #include // {INCLUDES} @@ -112,7 +113,7 @@ int main(int argc, char *argv[]) { // initialize df ROOT::RDataFrame df0(basetree, input_files); - // ROOT::RDF::Experimental::AddProgressBar(df0); + // ROOT::RDF::Experimental::AddProgressBar(df0); ROOT 6.30 not available for CS8 on lcg Logger::get("main")->info("Starting Setup of Dataframe with {} events", nevents); std::vector> cutReports; @@ -178,6 +179,11 @@ int main(int argc, char *argv[]) { cutflow.SetTitle("cutflow"); // iterate through the cutflow vector and fill the histogram with the // .GetPass() values + if (scope_counter >= cutReports.size()) { + Logger::get("main")->critical( + "Cutflow vector is too small, this should not happen"); + return 1; + } for (auto cut = cutReports[scope_counter].begin(); cut != cutReports[scope_counter].end(); cut++) { cutflow.SetBinContent( diff --git a/code_generation/analysis_template_friends.cxx b/code_generation/analysis_template_friends.cxx index 62a2c441..eab05050 100644 --- a/code_generation/analysis_template_friends.cxx +++ b/code_generation/analysis_template_friends.cxx @@ -27,6 +27,7 @@ #include #include #include +#include "onnxruntime_cxx_api.h" #include #include #include @@ -155,7 +156,7 @@ int main(int argc, char *argv[]) { } // initialize df ROOT::RDataFrame df0(dataset); - // ROOT::RDF::Experimental::AddProgressBar(df0); + // ROOT::RDF::Experimental::AddProgressBar(df0); ROOT 6.30 not available for CS8 on lcg // print all available branches to the log Logger::get("main")->debug("Available branches:"); for (auto const &branch : df0.GetColumnNames()) { diff --git a/code_generation/code_generation.py b/code_generation/code_generation.py index d9599762..70038a18 100644 --- a/code_generation/code_generation.py +++ b/code_generation/code_generation.py @@ -347,6 +347,7 @@ def write_code(self, calls: str, includes: str, run_commands: str) -> None: ) .replace("{ANALYSISTAG}", '"Analysis={}"'.format(self.analysis_name)) .replace("{CONFIGTAG}", '"Config={}"'.format(self.config_name)) + .replace("{PROGRESS_CALLBACK}", self.set_process_tracking()) .replace("{OUTPUT_QUANTITIES}", self.set_output_quantities()) .replace("{SHIFT_QUANTITIES_MAP}", self.set_shift_quantities_map()) .replace("{QUANTITIES_SHIFT_MAP}", self.set_quantities_shift_map()) @@ -547,6 +548,8 @@ def generate_run_commands(self) -> str: outputname=self._outputfiles_generated[scope], outputstring=outputstring, ) + # add code for tracking the progress + runcommands += self.set_process_tracking() # add code for the time taken for the dataframe setup runcommands += self.set_setup_printout() # add trigger of dataframe execution, for nonempty scopes @@ -681,6 +684,37 @@ def zero_events_fallback(self) -> str: return printout + def set_process_tracking(self) -> str: + """This function replaces the template placeholder for the process tracking with the correct process tracking. + + Returns: + The code to be added to the template + """ + tracking = "" + scope = self.scopes[-1] + tracking += " ULong64_t {scope}_processed = 0;\n".format(scope=scope) + tracking += " std::mutex {scope}_bar_mutex;\n".format(scope=scope) + tracking += " auto c_{scope} = df{counter}_{scope}.Count();\n".format( + counter=self.main_counter[scope], scope=scope + ) + tracking += " c_{scope}.OnPartialResultSlot(quantile, [&{scope}_bar_mutex, &{scope}_processed, &quantile, &nevents](unsigned int /*slot*/, ULong64_t /*_c*/) {{".format( + scope=scope + ) + tracking += ( + "\n std::lock_guard lg({scope}_bar_mutex);\n".format( + scope=scope + ) + ) + tracking += " {scope}_processed += quantile;\n".format(scope=scope) + tracking += " float percentage = 100 * (float){scope}_processed / (float)nevents;\n".format( + scope=scope + ) + tracking += ' Logger::get("main")->info("{{0:d}} / {{1:d}} ({{2:.2f}} %) Events processed ...", {scope}_processed, nevents, percentage);\n'.format( + scope=scope + ) + tracking += " });\n" + return tracking + def set_shift_quantities_map(self) -> str: """ This function is used to generate a mapping of all quantities and the shifts, @@ -764,7 +798,7 @@ def set_quantities_shift_map(self) -> str: global_commands = [] outputset = list(set(self.output_commands[scope] + global_commands)) # now split by __ and get a set of all the shifts per variable - for i, output in enumerate(outputset): + for output in outputset: try: quantity, shift = output.split("__") except ValueError: diff --git a/code_generation/configuration.py b/code_generation/configuration.py index c6d30552..39f88c14 100644 --- a/code_generation/configuration.py +++ b/code_generation/configuration.py @@ -86,7 +86,7 @@ def __init__( self.available_sample_types = set(available_sample_types) self.available_eras = set(available_eras) self.available_scopes = set(available_scopes) - self.available_outputs: QuantitiesStore = {} + self.available_outputs: Dict[str, QuantitiesStore] = {} self.available_shifts: Dict[str, Set[str]] = {} self.global_scope = "global" @@ -184,7 +184,9 @@ def setup_defaults(self) -> None: self.unpacked_producers[scope] = {} self.outputs[scope] = set() self.shifts[scope] = {} - self.available_outputs[scope] = set() + self.available_outputs[scope] = {} + for sampletype in self.available_sample_types: + self.available_outputs[scope][sampletype] = set() self.config_parameters[scope] = {} self.available_shifts[scope] = set() self._set_sample_parameters() @@ -229,9 +231,10 @@ def add_producers( producers = [producers] for scope in scopes: self.producers[scope].extend(producers) - self.available_outputs[scope].update( - CollectProducersOutput(producers, scope) - ) + for sampletype in self.available_sample_types: + self.available_outputs[scope][sampletype].update( + CollectProducersOutput(producers, scope) + ) self.unpack_producergroups(scope, producers) def unpack_producergroups( @@ -316,6 +319,7 @@ def add_shift( self, shift: Union[SystematicShift, SystematicShiftByQuantity], samples: Union[str, List[str], None] = None, + exclude_samples: Union[str, List[str], None] = None, ) -> None: """ Function used to add a systematics shift to the configuration. During this step, the shift is validated and applied. @@ -324,10 +328,32 @@ def add_shift( shift: The shift to be added. This must be a SystematicShift object. samples: The samples to which the shift should be applied. This can be a list of samples or a single sample. If ths option is not set, the shift is applied, regardless of the sample type. + exclude_samples: The samples to which the shift should not be applied. This can be a list of samples or a single sample. if exclude_samples is set, samples cannot be used. Returns: None """ + if exclude_samples is not None and samples is not None: + raise ConfigurationError( + f"You cannot use samples and exclude_samples at the same time -> Shift {shift}, samples {samples}, exclude_samples {exclude_samples}" + ) + if samples is not None: + if isinstance(samples, str): + samples = [samples] + for sample in samples: + if sample not in self.available_sample_types: + raise ConfigurationError( + f"Sampletype {sample} is not available -> Shift {shift}, available_sample_types {self.available_sample_types}, sample_types {samples}" + ) + if exclude_samples is not None: + if isinstance(exclude_samples, str): + exclude_samples = [exclude_samples] + for excluded_sample in exclude_samples: + if excluded_sample not in self.available_sample_types: + raise ConfigurationError( + f"Sampletype {excluded_sample} is not available for exclusion -> Shift {shift}, available_sample_types {self.available_sample_types}, excluded_sample_types {exclude_samples}" + ) + samples = list(self.available_sample_types - set(exclude_samples)) if self._is_valid_shift(shift): log.debug("Shift {} is valid".format(shift.shiftname)) if not isinstance(shift, SystematicShift): @@ -428,6 +454,7 @@ def add_modification_rule( raise TypeError("Rule must be of type ProducerRule") if not isinstance(scopes, list): scopes = [scopes] + rule.set_available_sampletypes(self.available_sample_types) rule.set_scopes(scopes) rule.set_global_scope(self.global_scope) self.rules.add(rule) @@ -495,7 +522,8 @@ def _remove_empty_scopes(self) -> None: del self.outputs[scope] del self.shifts[scope] del self.config_parameters[scope] - del self.available_outputs[scope] + for sampletype in self.available_sample_types: + del self.available_outputs[scope][sampletype] def _apply_rules(self) -> None: """ @@ -507,16 +535,17 @@ def _apply_rules(self) -> None: rule.apply( self.sample, self.producers, self.unpacked_producers, self.outputs ) - # also update the set of available outputs - for scope in rule.affected_scopes(): - if isinstance(rule, RemoveProducer): - self.available_outputs[scope] - CollectProducersOutput( - rule.affected_producers(), scope - ) - else: - self.available_outputs[scope].update( - CollectProducersOutput(rule.affected_producers(), scope) - ) + # also update the set of available outputs if the affected sample is the current sample + if self.sample in rule.samples: + for scope in rule.affected_scopes(): + if isinstance(rule, RemoveProducer): + self.available_outputs[scope][ + self.sample + ] -= CollectProducersOutput(rule.affected_producers(), scope) + else: + self.available_outputs[scope][self.sample].update( + CollectProducersOutput(rule.affected_producers(), scope) + ) def optimize(self) -> None: """ @@ -563,8 +592,8 @@ def _validate_outputs(self) -> None: ) # merge the two sets of outputs provided_outputs = ( - self.available_outputs[scope] - | self.available_outputs[self.global_scope] + self.available_outputs[scope][self.sample] + | self.available_outputs[self.global_scope][self.sample] ) missing_outputs = required_outputs - provided_outputs if len(missing_outputs) > 0: @@ -668,9 +697,21 @@ def _validate_parameters(self) -> None: required_parameters = {} for scope in self.scopes: + log.debug(" Collecting required parameters for scope {}".format(scope)) required_parameters[scope] = set() for producer in self.producers[scope]: - required_parameters[scope] |= producer.parameters[scope] + log.debug(" Collecting parameters for producer {}".format(producer)) + try: + required_parameters[scope] |= producer.parameters[scope] + except KeyError: + log.error( + f"Producer {producer} is not correctly setup for scope {scope}" + ) + raise ConfigurationError( + "Producer {} is not correctly setup for scope {}".format( + producer, scope + ) + ) # now check, which parameters are set in the configuration, but not used by any producer for scope in self.scopes: log.info("------------------------------------") diff --git a/code_generation/exceptions.py b/code_generation/exceptions.py index 0ffd579d..9695a522 100644 --- a/code_generation/exceptions.py +++ b/code_generation/exceptions.py @@ -61,6 +61,23 @@ def __init__(self, sample: str, available_samples: Union[Set[str], List[str]]): super().__init__(self.message) +class SampleRuleConfigurationError(ConfigurationError): + """ + Exception raised when the sample type used for a Rule provided by the user is not valid. + """ + + def __init__( + self, + sample: str, + rule, + available_samples: Union[Set[str], List[str]], + ): + self.message = "Sampletype {} cannot be used in Rule {} since the type is not defined. Available samples types are {}".format( + sample, rule, available_samples + ) + super().__init__(self.message) + + class EraConfigurationError(ConfigurationError): """ Exception raised when the era configuration provided by the user is not valid. diff --git a/code_generation/friend_trees.py b/code_generation/friend_trees.py index 320d7e1c..4300a4a4 100644 --- a/code_generation/friend_trees.py +++ b/code_generation/friend_trees.py @@ -303,9 +303,12 @@ def _shift_producer_inputs( # only shift if necessary if shift in self.input_quantities_mapping[scope].keys(): inputs_to_shift = [] - for input in inputs: - if input.name in self.input_quantities_mapping[scope][shift]: - inputs_to_shift.append(input) + for input_quantity in inputs: + if ( + input_quantity.name + in self.input_quantities_mapping[scope][shift] + ): + inputs_to_shift.append(input_quantity) if len(inputs_to_shift) > 0: log.debug("Adding shift %s to producer %s", shift, producer) producer.shift(shiftname, scope) @@ -335,7 +338,7 @@ def _validate_outputs(self) -> None: for scope in [scope for scope in self.scopes]: required_outputs = set(output for output in self.outputs[scope]) # merge the two sets of outputs - provided_outputs = self.available_outputs[scope] + provided_outputs = self.available_outputs[scope][self.sample] missing_outputs = required_outputs - provided_outputs if len(missing_outputs) > 0: raise InvalidOutputError(scope, missing_outputs) @@ -396,6 +399,7 @@ def add_modification_rule( raise TypeError("Rule must be of type ProducerRule") if not isinstance(scopes, list): scopes = [scopes] + rule.set_available_sampletypes(self.available_sample_types) rule.set_scopes(scopes) # TODO Check if this works without a global scope if self.global_scope is not None: @@ -414,6 +418,10 @@ def expanded_configuration(self) -> Configuration: expanded_configuration[scope] = {} if self.run_nominal: log.debug("Adding nominal in scope {}".format(scope)) + if scope not in self.config_parameters.keys(): + raise ConfigurationError( + "Scope {} not found in configuration parameters".format(scope) + ) expanded_configuration[scope]["nominal"] = self.config_parameters[scope] if len(self.shifts[scope]) > 0: for shift in self.shifts[scope]: diff --git a/code_generation/rules.py b/code_generation/rules.py index 8db71a56..0a0e5cfe 100644 --- a/code_generation/rules.py +++ b/code_generation/rules.py @@ -9,6 +9,7 @@ TProducerStore, ) from code_generation.quantity import QuantitiesStore +from code_generation.exceptions import SampleRuleConfigurationError log = logging.getLogger(__name__) @@ -17,7 +18,8 @@ class ProducerRule: def __init__( self, producers: TProducerInput, - samples: Union[str, List[str]], + samples: Union[str, List[str]] = [], + exclude_samples: Union[str, List[str]] = [], scopes: Union[str, List[str]] = "global", invert: bool = False, update_output: bool = True, @@ -26,7 +28,8 @@ def __init__( Args: producers: A list of producers or producer groups to be modified. - samples: A list of samples, for which the rule should be applied. + samples: A list of samples, for which the rule should be applied. Only one of samples and exclude_samples can be defined. + exclude_samples: A list of samples, for which the rule should not be applied. Only one of samples and exclude_samples can be defined. scopes: The scopes, in which the rule should be applied. Defaults to "global". invert: If set, the invert of the rule is applied. Defaults to False. update_output: If set, the output quantities are updated. Defaults to True. @@ -34,15 +37,42 @@ def __init__( if isinstance(producers, ProducerGroup) or isinstance(producers, Producer): producers = [producers] self.producers = producers - if isinstance(samples, str): - samples = [samples] - self.samples = samples if isinstance(scopes, str): scopes = [scopes] self.scopes = scopes self.invert = invert self.update_output = update_output self.global_scope = "global" + if isinstance(exclude_samples, str): + self.exclude_samples = [exclude_samples] + else: + self.exclude_samples = exclude_samples + if isinstance(samples, str): + self.samples = [samples] + else: + self.samples = samples + + def set_available_sampletypes(self, available_samples) -> None: + # sanitize input + if isinstance(available_samples, str): + self.available_samples = [available_samples] + else: + self.available_samples = available_samples + # make sure that either samples or exclude_samples are defined + if self.exclude_samples == [] and self.samples == []: + raise ValueError( + f"ProducerRule: Either samples or exclude_samples have to be defined!: (Rule: {self}, Samples: {self.samples}, Excluded Samples: {self.exclude_samples})" + ) + if self.exclude_samples != [] and self.samples != []: + raise ValueError( + f"ProducerRule: Both samples and are exclude_samples are defined, pick one!: (Rule: {self}, Samples: {self.samples}, Excluded Samples: {self.exclude_samples})" + ) + # make sure that the sampletypes are valid + self.validate_sampletypes(self.samples) + self.validate_sampletypes(self.exclude_samples) + # if exclude_samples are defined, we have to contstruct the list of samples them from the list of available samples + if self.exclude_samples != []: + self.samples = list(set(self.available_samples) - set(self.exclude_samples)) def set_scopes(self, scopes: List[str]) -> None: if isinstance(scopes, str): @@ -58,6 +88,19 @@ def affected_producers(self) -> List[Union[Producer, ProducerGroup]]: def set_global_scope(self, global_scope: str) -> None: self.global_scope = global_scope + def validate_sampletypes(self, sampletypes: List[str]) -> None: + """Function to check, if a rule is applicable, or if one of the defined samples is not available. + + Args: + available_samples (List[str]): List of available samples. + + Returns: + None + """ + for sample in sampletypes: + if sample not in self.available_samples: + raise SampleRuleConfigurationError(sample, self, self.available_samples) + # Evaluate whether modification should be applied depending on sample and inversion flag def is_applicable(self, sample: str) -> bool: applicable = sample in self.samples @@ -66,7 +109,11 @@ def is_applicable(self, sample: str) -> bool: return applicable # Placeholder for the actual operation on a list. To be overwritten by inheriting classes - def update_producers(self, producers_to_be_updated: TProducerStore) -> None: + def update_producers( + self, + producers_to_be_updated: TProducerStore, + unpacked_producers: TProducerStore, + ) -> None: log.error("Operation not implemented for ProducerRule base class!") def update_outputs(self, outputs_to_be_updated: QuantitiesStore) -> None: @@ -80,6 +127,7 @@ def apply( outputs_to_be_updated: QuantitiesStore, ) -> None: if self.is_applicable(sample): + log.critical(f"Applying rule {self} for sample {sample}") log.debug("For sample {}, applying >> {} ".format(sample, self)) self.update_producers(producers_to_be_updated, unpacked_producers) self.update_outputs(outputs_to_be_updated) diff --git a/code_generation/subset_template.cxx b/code_generation/subset_template.cxx index 525edfb2..6425d10e 100644 --- a/code_generation/subset_template.cxx +++ b/code_generation/subset_template.cxx @@ -24,6 +24,7 @@ #include "include/utility/Logger.hxx" #include "include/utility/OnnxSessionManager.hxx" #include +#include "onnxruntime_cxx_api.h" #include #include #include diff --git a/data/embedding/eleES_2016postVFPUL.json.gz b/data/embedding/eleES_2016postVFPUL.json.gz new file mode 100644 index 00000000..22501ed4 Binary files /dev/null and b/data/embedding/eleES_2016postVFPUL.json.gz differ diff --git a/data/embedding/eleES_2016preVFPUL.json.gz b/data/embedding/eleES_2016preVFPUL.json.gz new file mode 100644 index 00000000..7f207c3a Binary files /dev/null and b/data/embedding/eleES_2016preVFPUL.json.gz differ diff --git a/data/embedding/eleES_2017UL.json.gz b/data/embedding/eleES_2017UL.json.gz new file mode 100644 index 00000000..f659f75e Binary files /dev/null and b/data/embedding/eleES_2017UL.json.gz differ diff --git a/data/embedding/eleES_2018UL.json.gz b/data/embedding/eleES_2018UL.json.gz new file mode 100644 index 00000000..0f4ffb1c Binary files /dev/null and b/data/embedding/eleES_2018UL.json.gz differ diff --git a/data/embedding/embeddingselection_2016postVFPUL.json.gz b/data/embedding/embeddingselection_2016postVFPUL.json.gz index 7905c88a..3b4686a0 100644 Binary files a/data/embedding/embeddingselection_2016postVFPUL.json.gz and b/data/embedding/embeddingselection_2016postVFPUL.json.gz differ diff --git a/data/embedding/embeddingselection_2016preVFPUL.json.gz b/data/embedding/embeddingselection_2016preVFPUL.json.gz index 468fc7e8..1b75c7a8 100644 Binary files a/data/embedding/embeddingselection_2016preVFPUL.json.gz and b/data/embedding/embeddingselection_2016preVFPUL.json.gz differ diff --git a/data/embedding/embeddingselection_2017UL.json.gz b/data/embedding/embeddingselection_2017UL.json.gz new file mode 100644 index 00000000..dba9b9fd Binary files /dev/null and b/data/embedding/embeddingselection_2017UL.json.gz differ diff --git a/data/embedding/embeddingselection_2018UL.json.gz b/data/embedding/embeddingselection_2018UL.json.gz new file mode 100644 index 00000000..cf8c0c8c Binary files /dev/null and b/data/embedding/embeddingselection_2018UL.json.gz differ diff --git a/data/embedding/muon_2016postVFPUL.json.gz b/data/embedding/muon_2016postVFPUL.json.gz index fde53ed5..76bd9ad2 100644 Binary files a/data/embedding/muon_2016postVFPUL.json.gz and b/data/embedding/muon_2016postVFPUL.json.gz differ diff --git a/data/embedding/muon_2016preVFPUL.json.gz b/data/embedding/muon_2016preVFPUL.json.gz index 448f97d3..dc003c13 100644 Binary files a/data/embedding/muon_2016preVFPUL.json.gz and b/data/embedding/muon_2016preVFPUL.json.gz differ diff --git a/data/embedding/muon_2017UL.json.gz b/data/embedding/muon_2017UL.json.gz index 8620a6b7..558da347 100644 Binary files a/data/embedding/muon_2017UL.json.gz and b/data/embedding/muon_2017UL.json.gz differ diff --git a/data/embedding/muon_2018UL.json.gz b/data/embedding/muon_2018UL.json.gz index d0750023..2f3cdff7 100644 Binary files a/data/embedding/muon_2018UL.json.gz and b/data/embedding/muon_2018UL.json.gz differ diff --git a/data/jsonpog-integration b/data/jsonpog-integration index e13b3122..c45f3174 160000 --- a/data/jsonpog-integration +++ b/data/jsonpog-integration @@ -1 +1 @@ -Subproject commit e13b3122138d0abc0f52e129184772a38805d231 +Subproject commit c45f3174f9815640d9a6ef22bc5fc091e64f155f diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..fee2bf2d --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,23 @@ +# start from rootproject/root:6.30.04-ubuntu22.04 +FROM rootproject/root:6.30.04-ubuntu22.04 + +# Install necessary packages +RUN apt-get update -y +RUN apt-get install -y ca-certificates gpg wget git make python3-pip openmpi-bin libopenmpi-dev libboost-all-dev openssh-client nlohmann-json3-dev + +RUN python3 -m pip install GitPython && python3 -m pip install git+https://github.com/cms-nanoAOD/correctionlib.git + +# update cmake +RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null && \ +echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null && \ +apt-get update -y && \ +apt-get install -y cmake + +# Install onnxruntime +RUN git clone --recursive https://github.com/microsoft/onnxruntime.git +RUN cd onnxruntime && ./build.sh --config Release --update --build --build_shared_lib --parallel --allow_running_as_root + +# Install tini +RUN apt-get install -y tini + +ENTRYPOINT ["/usr/bin/tini", "--"] \ No newline at end of file diff --git a/docs/sphinx_source/requirements.txt b/docs/sphinx_source/requirements.txt index 188f51e6..75f09270 100644 --- a/docs/sphinx_source/requirements.txt +++ b/docs/sphinx_source/requirements.txt @@ -1 +1,2 @@ -breathe \ No newline at end of file +breathe +sphinx-rtd-theme \ No newline at end of file diff --git a/include/basefunctions.hxx b/include/basefunctions.hxx index a3a65725..5ec94511 100644 --- a/include/basefunctions.hxx +++ b/include/basefunctions.hxx @@ -349,6 +349,19 @@ inline auto FilterAbsMin(const float &cut) { }; } +/// Function to apply an exact filter requirement to an integer quantity. +/// Returns true if the value is equal to the given value +/// +/// \param cut The value of the filter +/// +/// \returns a lambda function to be used in RDF Define +inline auto FilterEqualInt(const int &cut) { + return [cut](const ROOT::RVec &values) { + ROOT::RVec mask = values == cut; + return mask; + }; +} + /// Function to combine two RVec Masks by multiplying the two RVec elementwise /// /// \param mask_1 The first mask diff --git a/include/pairselection.hxx b/include/pairselection.hxx index e1c9b2e8..9f54ae48 100644 --- a/include/pairselection.hxx +++ b/include/pairselection.hxx @@ -53,7 +53,9 @@ auto PairSelectionAlgo(const float &mindeltaR); namespace leptonic { auto ElMuPairSelectionAlgo(const float &mindeltaR); auto PairSelectionAlgo(const float &mindeltaR); +auto PairSelectionAlgoOSPreferred(const float &mindeltaR); auto ZBosonPairSelectionAlgo(const float &mindeltaR); +auto ZBosonPairSelectionAlgoOSPreferred(const float &mindeltaR); } // end namespace leptonic namespace mutau { @@ -95,10 +97,19 @@ ROOT::RDF::RNode PairSelection(ROOT::RDF::RNode df, const std::string &pairname, const float &mindeltaR); +ROOT::RDF::RNode PairSelectionOSPreferred(ROOT::RDF::RNode df, + const std::vector &input_vector, + const std::string &pairname, const float &mindeltaR); + ROOT::RDF::RNode ZBosonPairSelection(ROOT::RDF::RNode df, const std::vector &input_vector, const std::string &pairname, const float &mindeltaR); + +ROOT::RDF::RNode ZBosonPairSelectionOSPreferred( + ROOT::RDF::RNode df, const std::vector &input_vector, + const std::string &pairname, const float &mindeltaR); + } // end namespace mumu namespace elel { @@ -154,4 +165,4 @@ ROOT::RDF::RNode PairSelection(ROOT::RDF::RNode df, const float &mindeltaR, const float &btag_WP_value); } // end namespace bb_pairselection -#endif /* GUARD_PAIRSELECTION_H */ \ No newline at end of file +#endif /* GUARD_PAIRSELECTION_H */ diff --git a/include/physicsobjects.hxx b/include/physicsobjects.hxx index 527a4d45..d947bcd9 100644 --- a/include/physicsobjects.hxx +++ b/include/physicsobjects.hxx @@ -79,6 +79,8 @@ ROOT::RDF::RNode CheckForDiLeptonPairs( const std::string &leptons_phi, const std::string &leptons_mass, const std::string &leptons_charge, const std::string &leptons_mask, const float dR_cut); +ROOT::RDF::RNode SelectInt(ROOT::RDF::RNode df, const std::string &maskname, + const std::string &nameID, const int &IDvalue); namespace muon { ROOT::RDF::RNode CutID(ROOT::RDF::RNode df, const std::string &maskname, const std::string &nameID); @@ -150,7 +152,10 @@ ROOT::RDF::RNode PtCorrection_byValue(ROOT::RDF::RNode df, const std::string &corrected_pt, const std::string &pt, const std::string &eta, const float &sf_barrel, const float &sf_endcap); - +ROOT::RDF::RNode PtCorrection(ROOT::RDF::RNode df, const std::string &corrected_pt, + const std::string &pt, const std::string &eta, + const std::string &sf_barrel, const std::string &sf_endcap, + const std::string &sf_file, const std::string &jsonESname); ROOT::RDF::RNode CutID(ROOT::RDF::RNode df, const std::string &maskname, const std::string &nameID); ROOT::RDF::RNode CutCBID(ROOT::RDF::RNode df, const std::string &maskname, diff --git a/include/scalefactors.hxx b/include/scalefactors.hxx index 602ffed2..aec1e378 100644 --- a/include/scalefactors.hxx +++ b/include/scalefactors.hxx @@ -39,21 +39,20 @@ ROOT::RDF::RNode trigger(ROOT::RDF::RNode df, const std::string &pt, } // namespace muon namespace tau { -ROOT::RDF::RNode -id_vsJet_lt(ROOT::RDF::RNode df, const std::string &pt, - const std::string &decayMode, const std::string &genMatch, - const std::vector &selectedDMs, const std::string &wp, - const std::string &vsEle_wp, const std::string &sf_vsjet_tau30to35, - const std::string &sf_vsjet_tau35to40, - const std::string &sf_vsjet_tau40to500, - const std::string &sf_vsjet_tau500to1000, - const std::string &sf_vsjet_tau1000toinf, - const std::string &sf_dependence, const std::string &id_output, - const std::string &sf_file, const std::string &idAlgorithm); +ROOT::RDF::RNode id_vsJet_lt( + ROOT::RDF::RNode df, const std::string &pt, const std::string &decayMode, + const std::string &genMatch, const std::vector &selectedDMs, + const std::string &wp, const std::string &sf_vsjet_tau30to35, + const std::string &sf_vsjet_tau35to40, + const std::string &sf_vsjet_tau40to500, + const std::string &sf_vsjet_tau500to1000, + const std::string &sf_vsjet_tau1000toinf, const std::string &sf_dependence, + const std::string &vsele_wp, const std::string &id_output, + const std::string &sf_file, const std::string &idAlgorithm); ROOT::RDF::RNode id_vsJet_lt_embedding( ROOT::RDF::RNode df, const std::string &pt, const std::string &decayMode, const std::string &genMatch, const std::vector &selectedDMs, - const std::string &wp, const std::string &vsEle_wp, + const std::string &wp, const std::string &vsele_wp, const std::string &sf_vsjet_tau20to25, const std::string &sf_vsjet_tau25to30, const std::string &sf_vsjet_tau30to35, @@ -78,10 +77,10 @@ ROOT::RDF::RNode id_vsJet_lt_embedding( ROOT::RDF::RNode id_vsJet_tt( ROOT::RDF::RNode df, const std::string &pt, const std::string &decayMode, const std::string &genMatch, const std::vector &selectedDMs, - const std::string &wp, const std::string &vsEle_wp, - const std::string &sf_vsjet_tauDM0, const std::string &sf_vsjet_tauDM1, - const std::string &sf_vsjet_tauDM10, const std::string &sf_vsjet_tauDM11, - const std::string &sf_dependence, const std::string &id_output, + const std::string &wp, const std::string &sf_vsjet_tauDM0, + const std::string &sf_vsjet_tauDM1, const std::string &sf_vsjet_tauDM10, + const std::string &sf_vsjet_tauDM11, const std::string &sf_dependence, + const std::string &id_output, const std::string &vsele_wp, const std::string &sf_file, const std::string &idAlgorithm); ROOT::RDF::RNode id_vsEle(ROOT::RDF::RNode df, const std::string &eta, diff --git a/include/tripleselection.hxx b/include/tripleselection.hxx index 6e60cba6..cbb5a152 100644 --- a/include/tripleselection.hxx +++ b/include/tripleselection.hxx @@ -35,7 +35,7 @@ namespace three_flavor { auto TripleSelectionAlgo(const float &mindeltaR_leptau, const float &mindeltaR_leplep, const std::string &triple); } // end namespace three_flavor namespace two_flavor { -auto TripleSelectionAlgo(const float &mindeltaR_leptau, const float &mindeltaR_leplep); +auto TripleSelectionAlgo(const float &mindeltaR_leptau, const float &mindeltaR_leplep, const std::string &ss_or_os); } // end namespace two_flavor namespace lep_tautau { auto TripleSelectionAlgo(const float &mindeltaR_leptau, const float &mindeltaR_tautau); @@ -63,7 +63,7 @@ namespace mumutau { ROOT::RDF::RNode TripleSelection(ROOT::RDF::RNode df, const std::vector &input_vector, const std::string &triplename, - const float &mindeltaR_leptau, const float &mindeltaR_leplep); + const float &mindeltaR_leptau, const float &mindeltaR_leplep, const std::string &ss_or_os); } namespace mu_tautau { ROOT::RDF::RNode TripleSelection(ROOT::RDF::RNode df, diff --git a/init.sh b/init.sh index c79de9a8..fc4ecfdc 100644 --- a/init.sh +++ b/init.sh @@ -16,6 +16,9 @@ else distro=$(lsb_release -i | cut -f2) os_version=$(lsb_release -r | cut -f2) fi +distro=${distro//[[:space:]]/} +distro="${distro//Linux/}" +distro="${distro//linux/}" echo "Setting up CROWN for $distro Version $os_version" # check if the distro is centos if [[ "$distro" == "CentOS" ]]; then @@ -29,7 +32,7 @@ if [[ "$distro" == "CentOS" ]]; then echo "Unsupported CentOS version, exiting..." return 0 fi -elif [[ "$distro" == "RedHatEnterprise" || "$distro" == "AlmaLinux" || "$distro" == "RockyLinux" ]]; then +elif [[ "$distro" == "RedHatEnterprise" || "$distro" == "Alma" || "$distro" == "Rocky" ]]; then if [[ ${os_version:0:1} == "8" ]]; then # elif uname -a | grep -E 'el8' -q # no lcg 103 available for centOS 8 source /cvmfs/sft.cern.ch/lcg/views/LCG_102/x86_64-centos8-gcc11-opt/setup.sh diff --git a/src/pairselection.cxx b/src/pairselection.cxx index fbd38c29..e31aa053 100644 --- a/src/pairselection.cxx +++ b/src/pairselection.cxx @@ -871,6 +871,135 @@ auto PairSelectionAlgo(const float &mindeltaR) { return selected_pair; }; } +/** + * @brief Lambda function containg the algorithm to select the pair of leptons + * with the highest pt giving a preference to OS pairs first + * + * @param mindeltaR the seperation between the two leptons has to be larger + than this value + * + * @return vector with two entries, the first entry is the leading lepton + index, the second entry is the trailing lepton index + */ +auto PairSelectionAlgoOSPreferred(const float &mindeltaR) { + Logger::get("PairSelectionOSPreferred")->debug("Setting up algorithm"); + return [mindeltaR](const ROOT::RVec &lepton_pt, + const ROOT::RVec &lepton_eta, + const ROOT::RVec &lepton_phi, + const ROOT::RVec &lepton_mass, + const ROOT::RVec &lepton_charge, + const ROOT::RVec &lepton_mask) { + // first entry is the leading lepton index, + // second entry is the trailing lepton index + ROOT::RVec selected_pair = {-1, -1}; + const auto original_lepton_indices = ROOT::VecOps::Nonzero(lepton_mask); + // we need at least two fitting leptons + if (original_lepton_indices.size() < 2) { + return selected_pair; + } + const auto good_pts = + ROOT::VecOps::Take(lepton_pt, original_lepton_indices); + const auto good_etas = + ROOT::VecOps::Take(lepton_eta, original_lepton_indices); + const auto good_phis = + ROOT::VecOps::Take(lepton_phi, original_lepton_indices); + const auto good_masses = + ROOT::VecOps::Take(lepton_mass, original_lepton_indices); + const auto good_charges = + ROOT::VecOps::Take(lepton_charge, original_lepton_indices); + auto fourVecs = ROOT::VecOps::Construct( + good_pts, good_etas, good_phis, good_masses); + auto selected_lepton_indices = std::vector{-1, -1}; + auto selected_pts = std::vector{-1, -1}; + auto combinations = + ROOT::VecOps::Combinations(original_lepton_indices, 2); + if (original_lepton_indices.size() > 2) { + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("More than two suitable leptons found, printing " + "combinations.... "); + for (auto &comb : combinations) { + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("index: {}", comb); + }; + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("---------------------"); + } + + bool os_pair_found = false; + float total_et = -1.0; + float largest_total_et = -1.0; + + for (int n = 0; n < combinations[0].size(); n++) { + auto lepton_1 = fourVecs[combinations[0][n]]; + auto lepton_2 = fourVecs[combinations[1][n]]; + auto deltaR = ROOT::Math::VectorUtil::DeltaR(lepton_1, lepton_2); + auto os = (good_charges[combinations[0][n]] * + good_charges[combinations[1][n]]) < 0; + total_et = (lepton_1 + lepton_2).Et(); + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("deltaR check: {}", deltaR); + if (deltaR > mindeltaR) { + if (lepton_1.Pt() >= selected_pts[0] && + lepton_2.Pt() >= selected_pts[1]) { + if (os && (total_et > largest_total_et) || largest_total_et < 0) { + os_pair_found = true; + largest_total_et = total_et; + selected_pts[0] = lepton_1.Pt(); + selected_pts[1] = lepton_2.Pt(); + selected_lepton_indices[0] = + original_lepton_indices[combinations[0][n]]; + selected_lepton_indices[1] = + original_lepton_indices[combinations[1][n]]; + } + } + } + } + if (!os_pair_found) { + + Logger::get("PairSelectionAlgoOSPreferred") + ->debug("No suitable OS pair found, trying SS pairs"); + + for (int n = 0; n < combinations[0].size(); n++) { + auto lepton_1 = fourVecs[combinations[0][n]]; + auto lepton_2 = fourVecs[combinations[1][n]]; + auto deltaR = + ROOT::Math::VectorUtil::DeltaR(lepton_1, lepton_2); + total_et = (lepton_1 + lepton_2).Et(); + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("deltaR check: {}", deltaR); + if (deltaR > mindeltaR) { + if (lepton_1.Pt() >= selected_pts[0] && + lepton_2.Pt() >= selected_pts[1]) { + if (total_et > largest_total_et || largest_total_et < 0) { + largest_total_et = total_et; + selected_pts[0] = lepton_1.Pt(); + selected_pts[1] = lepton_2.Pt(); + selected_lepton_indices[0] = + original_lepton_indices[combinations[0][n]]; + selected_lepton_indices[1] = + original_lepton_indices[combinations[1][n]]; + } + } + } + } + }; + + if (good_pts[selected_lepton_indices[0]] < + good_pts[selected_lepton_indices[1]]) { + std::swap(selected_lepton_indices[0], selected_lepton_indices[1]); + } + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("good pts: {}", good_pts); + Logger::get("leptonic::PairSelectionAlgoOSPreferred") + ->debug("selected_lepton_indices: {}, {}", + selected_lepton_indices[0], selected_lepton_indices[1]); + + selected_pair = {static_cast(selected_lepton_indices[0]), + static_cast(selected_lepton_indices[1])}; + + return selected_pair; + }; +}; /** * @brief Lambda function containg the algorithm to select the pair * of leptons closest to the Z mass @@ -969,7 +1098,163 @@ auto ZBosonPairSelectionAlgo(const float &mindeltaR) { return selected_pair; }; } +/** + * @brief Lambda function containg the algorithm to select the pair + * of leptons closest to the Z mass giving a preference to OS pairs first + * + * @param mindeltaR the seperation between the two leptons has to be larger + * than this value + * @return vector with two entries, the first entry is the leading lepton + * index, the second entry is the trailing lepton index + */ +auto ZBosonPairSelectionAlgoOSPreferred(const float &mindeltaR) { + Logger::get("PairSelection")->debug("Setting up algorithm"); + return [mindeltaR](const ROOT::RVec &lepton_pt, + const ROOT::RVec &lepton_eta, + const ROOT::RVec &lepton_phi, + const ROOT::RVec &lepton_mass, + const ROOT::RVec &lepton_charge, + const ROOT::RVec &lepton_mask) { + // first entry is the leading lepton index, + // second entry is the trailing lepton index + ROOT::RVec selected_pair = {-1, -1}; + const auto original_lepton_indices = ROOT::VecOps::Nonzero(lepton_mask); + // we need at least two fitting leptons + if (original_lepton_indices.size() < 2) { + return selected_pair; + } + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("Running algorithm on good leptons"); + const auto good_pts = + ROOT::VecOps::Take(lepton_pt, original_lepton_indices); + const auto good_etas = + ROOT::VecOps::Take(lepton_eta, original_lepton_indices); + const auto good_phis = + ROOT::VecOps::Take(lepton_phi, original_lepton_indices); + const auto good_masses = + ROOT::VecOps::Take(lepton_mass, original_lepton_indices); + const auto good_charges = + ROOT::VecOps::Take(lepton_charge, original_lepton_indices); + auto fourVecs = ROOT::VecOps::Construct( + good_pts, good_etas, good_phis, good_masses); + + // --- + + auto selected_lepton_indices = std::vector{-1, -1}; + if (original_lepton_indices.size() > 2) { + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("More than two potential leptons found. running " + "algorithm to find Z Boson lepton pairs"); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("original_lepton_indices: {}", original_lepton_indices); + for (auto &fourVec : fourVecs) { + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("fourVec: {}", fourVec); + } + } + auto combinations = + ROOT::VecOps::Combinations(original_lepton_indices, 2); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("printing combinations.... "); + for (auto &comb : combinations) { + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("index: {}", comb); + }; + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("---------------------"); + + // --- + + float mass_difference = -1.0; + float zmass_candidate = -1.0; + bool os_pair_found = false; + float nominal_z_mass = 91.2; + + for (int n = 0; n < combinations[0].size(); n++) { + auto lepton_1 = fourVecs[combinations[0][n]]; + auto lepton_2 = fourVecs[combinations[1][n]]; + auto deltaR = ROOT::Math::VectorUtil::DeltaR(lepton_1, lepton_2); + auto os = (good_charges[combinations[0][n]] * + good_charges[combinations[1][n]]) < 0; + zmass_candidate = (lepton_1 + lepton_2).M(); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("eta_1 {} / pt_1 {} ", lepton_1.Eta(), lepton_1.Pt()); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("eta_2 {} / pt_2 {} ", lepton_2.Eta(), lepton_2.Pt()); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("deltaR check: {}", deltaR); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("mass check: {}", zmass_candidate); + if (deltaR > mindeltaR) { + if (std::abs(nominal_z_mass - zmass_candidate) < + mass_difference || + mass_difference < 0) { + if (os) { + os_pair_found = true; + mass_difference = + std::abs(nominal_z_mass - zmass_candidate); + selected_lepton_indices[0] = + original_lepton_indices[combinations[0][n]]; + selected_lepton_indices[1] = + original_lepton_indices[combinations[1][n]]; + } + } + } + } + + if (!os_pair_found) { + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("No suitable OS pair found, trying SS pairs"); + mass_difference = -1.0; + zmass_candidate = -1.0; + + for (int n = 0; n < combinations[0].size(); n++) { + auto lepton_1 = fourVecs[combinations[0][n]]; + auto lepton_2 = fourVecs[combinations[1][n]]; + auto deltaR = + ROOT::Math::VectorUtil::DeltaR(lepton_1, lepton_2); + zmass_candidate = (lepton_1 + lepton_2).M(); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("eta_1 {} / pt_1 {} ", lepton_1.Eta(), + lepton_1.Pt()); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("eta_2 {} / pt_2 {} ", lepton_2.Eta(), + lepton_2.Pt()); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("deltaR check: {}", deltaR); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("mass check: {}", zmass_candidate); + if (deltaR > mindeltaR) { + if (std::abs(nominal_z_mass - zmass_candidate) < + mass_difference || + mass_difference < 0) { + mass_difference = + std::abs(nominal_z_mass - zmass_candidate); + selected_lepton_indices[0] = + original_lepton_indices[combinations[0][n]]; + selected_lepton_indices[1] = + original_lepton_indices[combinations[1][n]]; + } + } + } + } + + if (good_pts[selected_lepton_indices[0]] < + good_pts[selected_lepton_indices[1]]) { + std::swap(selected_lepton_indices[0], selected_lepton_indices[1]); + } + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("good pts: {}", good_pts); + Logger::get("ZBosonPairSelectionAlgoOSPreferred") + ->debug("selected_lepton_indices: {}, {}", + selected_lepton_indices[0], selected_lepton_indices[1]); + + selected_pair = {static_cast(selected_lepton_indices[0]), + static_cast(selected_lepton_indices[1])}; + return selected_pair; + }; +}; } // namespace leptonic namespace mutau { @@ -1164,6 +1449,36 @@ ROOT::RDF::RNode PairSelection(ROOT::RDF::RNode df, input_vector); return df1; } +/** + * @brief Function used to select the pair of muons with the highest + * pt giving a preference to OS pairs first + * + * @param df the input dataframe + * @param input_vector vector of strings containing the columns + * needed for the alogrithm. For the muon pair selection the required + parameters are: + - muon_pt + - muon_eta + - muon_phi + - muon_mass + - muon_mask containing the flags whether the muon is a good muon or not + * @param pairname name of the new column containing the pair index + * @param mindeltaR the seperation between the two muons has to be larger + than + * this value + * @return a new dataframe with the pair index column added + */ +ROOT::RDF::RNode +PairSelectionOSPreferred(ROOT::RDF::RNode df, + const std::vector &input_vector, + const std::string &pairname, const float &mindeltaR) { + Logger::get("MuMuPairSelection")->debug("Setting up mumu pair building"); + auto df1 = df.Define( + pairname, + ditau_pairselection::leptonic::PairSelectionAlgoOSPreferred(mindeltaR), + input_vector); + return df1; +} /** * @brief Function used to select the pair of muons closest to the Z * mass @@ -1194,7 +1509,36 @@ ZBosonPairSelection(ROOT::RDF::RNode df, input_vector); return df1; } - +/** + * @brief Function used to select the pair of muons closest to the Z + * mass giving a preference to OS pairs first + * + * @param df the input dataframe + * @param input_vector . For the Z boson muon pair selection the required + parameters are: + - muon_pt + - muon_eta + - muon_phi + - muon_mass + - muon_mask containing the flags whether the muon is a good muon or not + * @param pairname name of the new column containing the pair index + * @param mindeltaR the seperation between the two muons has to be larger + than + * this value + * @return a new dataframe with the pair index column added + */ +ROOT::RDF::RNode ZBosonPairSelectionOSPreferred( + ROOT::RDF::RNode df, const std::vector &input_vector, + const std::string &pairname, const float &mindeltaR) { + Logger::get("ZMuMuPairSelectionOSPrefered") + ->debug("Setting up Z boson mumu pair building with OS preference"); + auto df1 = df.Define( + pairname, + ditau_pairselection::leptonic::ZBosonPairSelectionAlgoOSPreferred( + mindeltaR), + input_vector); + return df1; +} // end namespace mumu } // end namespace mumu namespace elel { /** @@ -1807,4 +2151,4 @@ ROOT::RDF::RNode PairSelection(ROOT::RDF::RNode df, return df1; } } // end namespace bb_pairselection -#endif /* GUARD_PAIRSELECTION_H */ \ No newline at end of file +#endif /* GUARD_PAIRSELECTION_H */ diff --git a/src/physicsobjects.cxx b/src/physicsobjects.cxx index 0126d487..f56c4aa6 100644 --- a/src/physicsobjects.cxx +++ b/src/physicsobjects.cxx @@ -406,6 +406,21 @@ ROOT::RDF::RNode CheckForDiLeptonPairs( leptons_charge, leptons_mask}); return df1; } +/// Function to select objects based on matching a specific integer value +/// +/// \param[in] df the input dataframe +/// \param[out] maskname the name of the new mask to be added as column to +/// the dataframe +/// \param[in] nameID name of the ID column in the NanoAOD +/// \param[in] IDvalue value that has to match +/// +/// \return a dataframe containing the new mask +ROOT::RDF::RNode SelectInt(ROOT::RDF::RNode df, const std::string &maskname, + const std::string &nameID, const int &IDvalue) { + auto df1 = + df.Define(maskname, basefunctions::FilterEqualInt(IDvalue), {nameID}); + return df1; +} /// Muon specific functions namespace muon { @@ -968,6 +983,55 @@ PtCorrection_byValue(ROOT::RDF::RNode df, const std::string &corrected_pt, df.Define(corrected_pt, electron_pt_correction_lambda, {pt, eta}); return df1; } +/// Function to correct electron pt, based on correctionlib file +/// +/// \param[in] df the input dataframe +/// \param[out] corrected_pt name of the corrected tau pt to be calculated +/// \param[in] pt name of the raw tau pt +/// \param[in] eta name of raw tau eta +/// \param[in] sf_barrel scale factor to be applied to electrons in the barrel +/// \param[in] sf_endcap scale factor to be applied to electrons in the endcap +/// \param[in] sf_file: +/// \param[in] jsonESname name of the tau energy correction in the json file +/// +/// \return a dataframe containing the new mask +ROOT::RDF::RNode +PtCorrection(ROOT::RDF::RNode df, const std::string &corrected_pt, + const std::string &pt, const std::string &eta, + const std::string &sf_barrel, const std::string &sf_endcap, + const std::string &sf_file, const std::string &jsonESname) { + auto evaluator = + correction::CorrectionSet::from_file(sf_file)->at(jsonESname); + auto electron_pt_correction_lambda = + [evaluator, sf_barrel, sf_endcap](const ROOT::RVec &pt_values, + const ROOT::RVec &eta) { + ROOT::RVec corrected_pt_values(pt_values.size()); + for (int i = 0; i < pt_values.size(); i++) { + if (abs(eta.at(i)) <= 1.479) { + auto sf = evaluator->evaluate( + {"barrel", sf_barrel}); + corrected_pt_values[i] = pt_values.at(i) * sf; + Logger::get("eleEnergyCorrection") + ->debug("barrel: ele pt before {}, ele pt after {}, sf {}", + pt_values.at(i), corrected_pt_values.at(i), + sf); + } else if (abs(eta.at(i)) > 1.479) { + auto sf = evaluator->evaluate( + {"endcap", sf_endcap}); + corrected_pt_values[i] = pt_values.at(i) * sf; + Logger::get("eleEnergyCorrection") + ->debug("endcap: ele pt before {}, ele pt after {}, sf {}", + pt_values.at(i), corrected_pt_values.at(i), + sf); + } else { + corrected_pt_values[i] = pt_values.at(i); + } + } + return corrected_pt_values; + }; + auto df1 = df.Define(corrected_pt, electron_pt_correction_lambda, {pt, eta}); + return df1; +} /// Function to cut electrons based on the electron MVA ID /// /// \param[in] df the input dataframe diff --git a/src/scalefactors.cxx b/src/scalefactors.cxx index 60257548..cf05ba61 100644 --- a/src/scalefactors.cxx +++ b/src/scalefactors.cxx @@ -320,29 +320,29 @@ for nominal for nominal * and "systup"/"systdown" the up/down variation * @param sf_dependence "pt", "dm" or "eta" based scale factors + * @param vsele_wp working point of the vsEle cut * @param id_output name of the id scale factor column * @param sf_file path to the file with the tau scale factors * @param idAlgorithm name of the tau id scale factor * @return a new dataframe containing the new column */ -ROOT::RDF::RNode -id_vsJet_lt(ROOT::RDF::RNode df, const std::string &pt, - const std::string &decayMode, const std::string &genMatch, - const std::vector &selectedDMs, const std::string &wp, - const std::string &vsEle_wp, const std::string &sf_vsjet_tau30to35, - const std::string &sf_vsjet_tau35to40, - const std::string &sf_vsjet_tau40to500, - const std::string &sf_vsjet_tau500to1000, - const std::string &sf_vsjet_tau1000toinf, - const std::string &sf_dependence, const std::string &id_output, - const std::string &sf_file, const std::string &idAlgorithm) { +ROOT::RDF::RNode id_vsJet_lt( + ROOT::RDF::RNode df, const std::string &pt, const std::string &decayMode, + const std::string &genMatch, const std::vector &selectedDMs, + const std::string &wp, const std::string &sf_vsjet_tau30to35, + const std::string &sf_vsjet_tau35to40, + const std::string &sf_vsjet_tau40to500, + const std::string &sf_vsjet_tau500to1000, + const std::string &sf_vsjet_tau1000toinf, const std::string &sf_dependence, + const std::string &vsele_wp, const std::string &id_output, + const std::string &sf_file, const std::string &idAlgorithm) { Logger::get("TauIDvsJet_lt_SF") ->debug("Setting up function for tau id vsJet sf"); Logger::get("TauIDvsJet_lt_SF")->debug("ID - Name {}", idAlgorithm); auto evaluator = correction::CorrectionSet::from_file(sf_file)->at(idAlgorithm); - auto idSF_calculator = [evaluator, wp, vsEle_wp, sf_vsjet_tau30to35, + auto idSF_calculator = [evaluator, wp, vsele_wp, sf_vsjet_tau30to35, sf_vsjet_tau35to40, sf_vsjet_tau40to500, sf_vsjet_tau500to1000, sf_vsjet_tau1000toinf, sf_dependence, selectedDMs, @@ -394,34 +394,34 @@ id_vsJet_lt(ROOT::RDF::RNode df, const std::string &pt, selectedDMs.end()) { Logger::get("TauIDvsJet_lt_SF") ->debug("ID {} - pt {}, decayMode {}, genMatch {}, wp {}, " - "vsEle_wp {}, " + "vsele_wp {}, " "sf_vsjet_tau30to35 {}, sf_vsjet_tau35to40 {}, " "sf_vsjet_tau40to500 {}, sf_vsjet_tau500to1000 {}, " "sf_vsjet_tau1000toinf {}, sf_dependence {}", - idAlgorithm, pt, decayMode, genMatch, wp, vsEle_wp, + idAlgorithm, pt, decayMode, genMatch, wp, vsele_wp, sf_vsjet_tau30to35, sf_vsjet_tau35to40, sf_vsjet_tau40to500, sf_vsjet_tau500to1000, sf_vsjet_tau1000toinf, sf_dependence); if (pt >= 30.0 && pt < 35.0) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tau30to35, sf_dependence}); + vsele_wp, sf_vsjet_tau30to35, sf_dependence}); } else if (pt >= 35.0 && pt < 40.0) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tau35to40, sf_dependence}); + vsele_wp, sf_vsjet_tau35to40, sf_dependence}); } else if (pt >= 40.0 && pt < 500.0) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tau40to500, sf_dependence}); + vsele_wp, sf_vsjet_tau40to500, sf_dependence}); } else if (pt >= 500.0 && pt < 1000.0) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tau500to1000, sf_dependence}); + vsele_wp, sf_vsjet_tau500to1000, sf_dependence}); } else if (pt >= 1000.0 && pt < 2000.0) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tau1000toinf, sf_dependence}); + vsele_wp, sf_vsjet_tau1000toinf, sf_dependence}); } else { sf = 1.; } @@ -445,7 +445,7 @@ channel with the correctionlib for tauembedded samples * @param selectedDMs list of allowed decay modes for which a scale factor * should be calculated * @param wp working point of the ID cut - * @param vsEle_wp working point of the vs electron ID + * @param vsele_wp working point of the vs electron ID * @param sf_vsjet_tau20to25 id for the variation of the scale factor "sf" for nominal * and "systup"/"systdown" the up/down variation @@ -470,7 +470,7 @@ for nominal ROOT::RDF::RNode id_vsJet_lt_embedding( ROOT::RDF::RNode df, const std::string &pt, const std::string &decayMode, const std::string &genMatch, const std::vector &selectedDMs, - const std::string &wp, const std::string &vsEle_wp, + const std::string &wp, const std::string &vsele_wp, const std::string &sf_vsjet_tau20to25, const std::string &sf_vsjet_tau25to30, const std::string &sf_vsjet_tau30to35, @@ -485,7 +485,7 @@ ROOT::RDF::RNode id_vsJet_lt_embedding( ->debug("ID - Name {}", idAlgorithm); auto evaluator = correction::CorrectionSet::from_file(sf_file)->at(idAlgorithm); - auto idSF_calculator = [evaluator, wp, vsEle_wp, sf_vsjet_tau20to25, + auto idSF_calculator = [evaluator, wp, vsele_wp, sf_vsjet_tau20to25, sf_vsjet_tau25to30, sf_vsjet_tau30to35, sf_vsjet_tau35to40, sf_vsjet_tau40toInf, sf_dependence, selectedDMs, @@ -501,33 +501,33 @@ ROOT::RDF::RNode id_vsJet_lt_embedding( selectedDMs.end()) { Logger::get("TauIDvsJet_lt_SF_embedding") ->debug("ID {} - pt {}, decayMode {}, genMatch {}, wp {}, " - "vsEle_wp {}, " + "vsele_wp {}, " "sf_vsjet_tau20to25 {}, sf_vsjet_tau25to30 {}, " "sf_vsjet_tau30to35{}, sf_vsjet_tau35to40 {}, " "sf_vsjet_tau40toInf {}, sf_dependence {}", - idAlgorithm, pt, decayMode, genMatch, wp, vsEle_wp, + idAlgorithm, pt, decayMode, genMatch, wp, vsele_wp, sf_vsjet_tau20to25, sf_vsjet_tau25to30, sf_vsjet_tau30to35, sf_vsjet_tau35to40, sf_vsjet_tau40toInf, sf_dependence); if (pt >= 20.0 && pt < 25.0) { sf = evaluator->evaluate( - {pt, decayMode, static_cast(genMatch), wp, vsEle_wp, + {pt, decayMode, static_cast(genMatch), wp, vsele_wp, sf_vsjet_tau20to25, sf_dependence}); } else if (pt >= 25.0 && pt < 30.0) { sf = evaluator->evaluate( - {pt, decayMode, static_cast(genMatch), wp, vsEle_wp, + {pt, decayMode, static_cast(genMatch), wp, vsele_wp, sf_vsjet_tau25to30, sf_dependence}); } else if (pt >= 30.0 && pt < 35.0) { sf = evaluator->evaluate( - {pt, decayMode, static_cast(genMatch), wp, vsEle_wp, + {pt, decayMode, static_cast(genMatch), wp, vsele_wp, sf_vsjet_tau30to35, sf_dependence}); } else if (pt >= 35.0 && pt < 40.0) { sf = evaluator->evaluate( - {pt, decayMode, static_cast(genMatch), wp, vsEle_wp, + {pt, decayMode, static_cast(genMatch), wp, vsele_wp, sf_vsjet_tau35to40, sf_dependence}); } else if (pt >= 40.0 && pt < 2000.0) { sf = evaluator->evaluate( - {pt, decayMode, static_cast(genMatch), wp, vsEle_wp, + {pt, decayMode, static_cast(genMatch), wp, vsele_wp, sf_vsjet_tau40toInf, sf_dependence}); } else { sf = 1.; @@ -723,6 +723,7 @@ nominal nominal * and "systup"/"systdown" the up/down variation * @param sf_dependence "pt", "dm" or "eta" based scale factors + * @param vsele_wp working point of the vsEle cut * @param id_output name of the id scale factor column * @param sf_file path to the file with the tau scale factors * @param idAlgorithm name of the tau id scale factor @@ -731,10 +732,10 @@ nominal ROOT::RDF::RNode id_vsJet_tt( ROOT::RDF::RNode df, const std::string &pt, const std::string &decayMode, const std::string &genMatch, const std::vector &selectedDMs, - const std::string &wp, const std::string &vsEle_wp, - const std::string &sf_vsjet_tauDM0, const std::string &sf_vsjet_tauDM1, - const std::string &sf_vsjet_tauDM10, const std::string &sf_vsjet_tauDM11, - const std::string &sf_dependence, const std::string &id_output, + const std::string &wp, const std::string &sf_vsjet_tauDM0, + const std::string &sf_vsjet_tauDM1, const std::string &sf_vsjet_tauDM10, + const std::string &sf_vsjet_tauDM11, const std::string &sf_dependence, + const std::string &vsele_wp, const std::string &id_output, const std::string &sf_file, const std::string &idAlgorithm) { Logger::get("TauIDvsJet_tt_SF") @@ -742,7 +743,7 @@ ROOT::RDF::RNode id_vsJet_tt( Logger::get("TauIDvsJet_tt_SF")->debug("ID - Name {}", idAlgorithm); auto evaluator = correction::CorrectionSet::from_file(sf_file)->at(idAlgorithm); - auto idSF_calculator = [evaluator, wp, vsEle_wp, sf_vsjet_tauDM0, + auto idSF_calculator = [evaluator, wp, vsele_wp, sf_vsjet_tauDM0, sf_vsjet_tauDM1, sf_vsjet_tauDM10, sf_vsjet_tauDM11, sf_dependence, selectedDMs, idAlgorithm](const float &pt, const int &decayMode, @@ -788,29 +789,29 @@ ROOT::RDF::RNode id_vsJet_tt( selectedDMs.end()) { Logger::get("TauIDvsJet_tt_SF") ->debug("ID {} - pt {}, decayMode {}, genMatch {}, wp {}, " - "vsEle_wp {}, " + "vsele_wp {}, " "sf_vsjet_tauDM0 {}, sf_vsjet_tauDM1 {}, " "sf_vsjet_tauDM1 {}, sf_vsjet_tauDM10{}, " "sf_vsjet_tauDM11 {}, sf_dependence {}", - idAlgorithm, pt, decayMode, genMatch, wp, vsEle_wp, + idAlgorithm, pt, decayMode, genMatch, wp, vsele_wp, sf_vsjet_tauDM0, sf_vsjet_tauDM1, sf_vsjet_tauDM10, sf_vsjet_tauDM11, sf_dependence); if (decayMode == 0) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tauDM0, sf_dependence}); + vsele_wp, sf_vsjet_tauDM0, sf_dependence}); } else if (decayMode == 1) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tauDM1, sf_dependence}); + vsele_wp, sf_vsjet_tauDM1, sf_dependence}); } else if (decayMode == 10) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tauDM10, sf_dependence}); + vsele_wp, sf_vsjet_tauDM10, sf_dependence}); } else if (decayMode == 11) { sf = evaluator->evaluate( {pt, decayMode, static_cast(genMatch), wp, - vsEle_wp, sf_vsjet_tauDM11, sf_dependence}); + vsele_wp, sf_vsjet_tauDM11, sf_dependence}); } else { sf = 1.; } diff --git a/src/triggers.cxx b/src/triggers.cxx index a7c4a287..d7c7805f 100644 --- a/src/triggers.cxx +++ b/src/triggers.cxx @@ -147,25 +147,34 @@ bool matchParticle(const ROOT::Math::PtEtaPhiMVector &particle, bool pt = particle.pt() > pt_cut; bool eta = abs(particle.eta()) < eta_cut; Logger::get("CheckTriggerMatch") - ->debug("Partice Lorentz Vector: {}, {}, {}, {}", particle.pt(), particle.eta(), particle.phi(), particle.mass()); + ->debug("Partice Lorentz Vector: {}, {}, {}, {}", particle.pt(), + particle.eta(), particle.phi(), particle.mass()); Logger::get("CheckTriggerMatch") ->debug("-------------------------------------------------------"); - Logger::get("CheckTriggerMatch")->debug("deltaR/matchDeltaR Check: {}/{}", deltaR, matchDeltaR); + Logger::get("CheckTriggerMatch") + ->debug("deltaR/matchDeltaR Check: {}/{}", deltaR, matchDeltaR); Logger::get("CheckTriggerMatch") ->debug("deltaR Value: {}", ROOT::Math::VectorUtil::DeltaR(triggerobject, particle)); - Logger::get("CheckTriggerMatch")->debug("id/trigger_particle_id_cut Check: {}/{}", id, trigger_particle_id_cut); + Logger::get("CheckTriggerMatch") + ->debug("id/trigger_particle_id_cut Check: {}/{}", id, + trigger_particle_id_cut); Logger::get("CheckTriggerMatch") ->debug("id Value: {}", triggerobject_ids[idx]); - Logger::get("CheckTriggerMatch")->debug("bit/triggerbit_cut Check: {}/{}", bit, triggerbit_cut); + Logger::get("CheckTriggerMatch") + ->debug("bit/triggerbit_cut Check: {}/{}", bit, triggerbit_cut); Logger::get("CheckTriggerMatch") ->debug("bit Value: {}", IntBits(triggerobject_bits[idx])); - Logger::get("CheckTriggerMatch")->debug("pt/pt_cut Check: {}/{}", pt, pt_cut); Logger::get("CheckTriggerMatch") - ->debug("pt Value (trg): {}, pt Value (reco): {}", triggerobject_pts[idx], particle.pt()); - Logger::get("CheckTriggerMatch")->debug("eta/eta_cut Check: {}/{}", eta, eta_cut); + ->debug("pt/pt_cut Check: {}/{}", pt, pt_cut); + Logger::get("CheckTriggerMatch") + ->debug("pt Value (trg): {}, pt Value (reco): {}", + triggerobject_pts[idx], particle.pt()); Logger::get("CheckTriggerMatch") - ->debug("eta (trg) Value: {}, eta (reco) Value: {}", triggerobject_etas[idx], abs(particle.eta())); + ->debug("eta/eta_cut Check: {}/{}", eta, eta_cut); + Logger::get("CheckTriggerMatch") + ->debug("eta (trg) Value: {}, eta (reco) Value: {}", + triggerobject_etas[idx], abs(particle.eta())); Logger::get("CheckTriggerMatch") ->debug("-------------------------------------------------------"); if (deltaR && bit && id && pt && eta) { @@ -221,38 +230,38 @@ ROOT::RDF::RNode GenerateSingleTriggerFlag( const int &trigger_particle_id_cut, const int &triggerbit_cut, const float &DeltaR_threshold) { - auto triggermatch = - [DeltaR_threshold, pt_cut, eta_cut, trigger_particle_id_cut, - triggerbit_cut, hltpath](bool hltpath_match, - const ROOT::Math::PtEtaPhiMVector &particle_p4, - ROOT::RVec triggerobject_bits, - ROOT::RVec triggerobject_ids, - ROOT::RVec triggerobject_pts, - ROOT::RVec triggerobject_etas, - ROOT::RVec triggerobject_phis) { - Logger::get("GenerateSingleTriggerFlag")->debug("Checking Trigger"); + auto triggermatch = [DeltaR_threshold, pt_cut, eta_cut, + trigger_particle_id_cut, triggerbit_cut, hltpath]( + bool hltpath_match, + const ROOT::Math::PtEtaPhiMVector &particle_p4, + ROOT::RVec triggerobject_bits, + ROOT::RVec triggerobject_ids, + ROOT::RVec triggerobject_pts, + ROOT::RVec triggerobject_etas, + ROOT::RVec triggerobject_phis) { + Logger::get("GenerateSingleTriggerFlag")->debug("Checking Trigger"); + Logger::get("CheckTriggerMatch") + ->debug("Selected trigger: {}", hltpath); + bool result = false; + bool match_result = false; + if (hltpath_match) { Logger::get("CheckTriggerMatch") - ->debug("Selected trigger: {}", hltpath); - bool result = false; - bool match_result = false; - if (hltpath_match) { - Logger::get("CheckTriggerMatch") - ->debug("Checking Triggerobject match with particles ...."); - match_result = matchParticle( - particle_p4, triggerobject_pts, triggerobject_etas, - triggerobject_phis, triggerobject_bits, triggerobject_ids, - DeltaR_threshold, pt_cut, eta_cut, trigger_particle_id_cut, - triggerbit_cut); - } - result = hltpath_match & match_result; - Logger::get("GenerateSingleTriggerFlag") - ->debug("---> HLT Match: {}", hltpath_match); - Logger::get("GenerateSingleTriggerFlag") - ->debug("---> Total Match: {}", match_result); - Logger::get("GenerateSingleTriggerFlag") - ->debug("--->>>> result: {}", result); - return result; - }; + ->debug("Checking Triggerobject match with particles ...."); + match_result = matchParticle( + particle_p4, triggerobject_pts, triggerobject_etas, + triggerobject_phis, triggerobject_bits, triggerobject_ids, + DeltaR_threshold, pt_cut, eta_cut, trigger_particle_id_cut, + triggerbit_cut); + } + result = hltpath_match & match_result; + Logger::get("GenerateSingleTriggerFlag") + ->debug("---> HLT Match: {}", hltpath_match); + Logger::get("GenerateSingleTriggerFlag") + ->debug("---> Total Match: {}", match_result); + Logger::get("GenerateSingleTriggerFlag") + ->debug("--->>>> result: {}", result); + return result; + }; auto available_trigger = df.GetColumnNames(); std::vector matched_trigger_names; std::regex hltpath_regex = std::regex(hltpath); @@ -367,7 +376,7 @@ ROOT::RDF::RNode GenerateDoubleTriggerFlag( ROOT::RVec triggerobject_phis) { Logger::get("GenerateDoubleTriggerFlag")->debug("Checking Trigger"); Logger::get("CheckTriggerMatch") - ->debug("Selected trigger: {}", hltpath); + ->debug("Selected trigger: {}", hltpath); bool result = false; bool match_result_p1 = false; bool match_result_p2 = false; @@ -736,25 +745,34 @@ bool matchParticle(const ROOT::Math::PtEtaPhiMVector &particle, (trigger_particle_pt_cut < 0.) || (triggerobject_pts[idx] > trigger_particle_pt_cut); Logger::get("CheckTriggerMatch") - ->debug("Partice Lorentz Vector: {}, {}, {}, {}", particle.pt(), particle.eta(), particle.phi(), particle.mass()); + ->debug("Partice Lorentz Vector: {}, {}, {}, {}", particle.pt(), + particle.eta(), particle.phi(), particle.mass()); Logger::get("CheckTriggerMatch") ->debug("-------------------------------------------------------"); - Logger::get("CheckTriggerMatch")->debug("deltaR/matchDeltaR Check: {}/{}", deltaR, matchDeltaR); + Logger::get("CheckTriggerMatch") + ->debug("deltaR/matchDeltaR Check: {}/{}", deltaR, matchDeltaR); Logger::get("CheckTriggerMatch") ->debug("deltaR Value: {}", ROOT::Math::VectorUtil::DeltaR(triggerobject, particle)); - Logger::get("CheckTriggerMatch")->debug("id/trigger_particle_id_cut Check: {}/{}", id, trigger_particle_id_cut); + Logger::get("CheckTriggerMatch") + ->debug("id/trigger_particle_id_cut Check: {}/{}", id, + trigger_particle_id_cut); Logger::get("CheckTriggerMatch") ->debug("id Value: {}", triggerobject_ids[idx]); - Logger::get("CheckTriggerMatch")->debug("bit/triggerbit_cut Check: {}/{}", bit, triggerbit_cut); + Logger::get("CheckTriggerMatch") + ->debug("bit/triggerbit_cut Check: {}/{}", bit, triggerbit_cut); Logger::get("CheckTriggerMatch") ->debug("bit Value: {}", IntBits(triggerobject_bits[idx])); - Logger::get("CheckTriggerMatch")->debug("pt/pt_cut Check: {}/{}", pt, pt_cut); Logger::get("CheckTriggerMatch") - ->debug("pt Value (trg): {}, pt Value (reco): {}", triggerobject_pts[idx], particle.pt()); - Logger::get("CheckTriggerMatch")->debug("eta/eta_cut Check: {}/{}", eta, eta_cut); + ->debug("pt/pt_cut Check: {}/{}", pt, pt_cut); Logger::get("CheckTriggerMatch") - ->debug("eta (trg) Value: {}, eta (reco) Value: {}", triggerobject_etas[idx], abs(particle.eta())); + ->debug("pt Value (trg): {}, pt Value (reco): {}", + triggerobject_pts[idx], particle.pt()); + Logger::get("CheckTriggerMatch") + ->debug("eta/eta_cut Check: {}/{}", eta, eta_cut); + Logger::get("CheckTriggerMatch") + ->debug("eta (trg) Value: {}, eta (reco) Value: {}", + triggerobject_etas[idx], abs(particle.eta())); Logger::get("CheckTriggerMatch") ->debug("-------------------------------------------------------"); if (deltaR && bit && id && pt && eta && trigger_particle_pt) { @@ -810,7 +828,8 @@ ROOT::RDF::RNode MatchSingleTriggerObject( const float &DeltaR_threshold, const float &trigger_particle_pt_cut) { auto triggermatch = [DeltaR_threshold, pt_cut, eta_cut, - trigger_particle_id_cut, triggerbit_cut, trigger_particle_pt_cut]( + trigger_particle_id_cut, triggerbit_cut, + trigger_particle_pt_cut]( const ROOT::Math::PtEtaPhiMVector &particle_p4, ROOT::RVec triggerobject_bits, ROOT::RVec triggerobject_ids, @@ -820,11 +839,11 @@ ROOT::RDF::RNode MatchSingleTriggerObject( Logger::get("MatchSingleTriggerObject")->debug("Checking Trigger"); Logger::get("MatchSingleTriggerObject") ->debug("Checking Triggerobject match with particles ...."); - bool match_result = - matchParticle(particle_p4, triggerobject_pts, triggerobject_etas, - triggerobject_phis, triggerobject_bits, - triggerobject_ids, DeltaR_threshold, pt_cut, eta_cut, - trigger_particle_id_cut, triggerbit_cut, trigger_particle_pt_cut); + bool match_result = matchParticle( + particle_p4, triggerobject_pts, triggerobject_etas, + triggerobject_phis, triggerobject_bits, triggerobject_ids, + DeltaR_threshold, pt_cut, eta_cut, trigger_particle_id_cut, + triggerbit_cut, trigger_particle_pt_cut); Logger::get("MatchSingleTriggerObject") ->debug("--->>>> match_result: {}", match_result); return match_result; @@ -888,7 +907,7 @@ ROOT::RDF::RNode GenerateSingleTriggerFlag( ROOT::RVec triggerobject_phis) { Logger::get("GenerateSingleTriggerFlag")->debug("Checking Trigger"); Logger::get("CheckTriggerMatch") - ->debug("Selected trigger: {}", hltpath); + ->debug("Selected trigger: {}", hltpath); bool result = false; bool match_result = false; if (hltpath_match) { @@ -1149,7 +1168,7 @@ ROOT::RDF::RNode GetPrescaleValues(ROOT::RDF::RNode df, Logger::get("prescale") ->debug("... checking lumi {}, prescale {} ...", std::stoi(i_key), int(i_value)); - if (lumiblock > std::stoi(i_key)) { + if (lumiblock >= std::stoi(i_key)) { if (std::stoi(i_key) >= highest_lumi) { highest_lumi = std::stoi(i_key); prescale = i_value; diff --git a/src/tripleselection.cxx b/src/tripleselection.cxx index f58ee821..17df8b69 100644 --- a/src/tripleselection.cxx +++ b/src/tripleselection.cxx @@ -311,16 +311,19 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, const ROOT::RVec &tau_phi, const ROOT::RVec &tau_mass, const ROOT::RVec &tau_iso, + const ROOT::RVec &tau_charge, const ROOT::RVec &electron_pt, const ROOT::RVec &electron_eta, const ROOT::RVec &electron_phi, const ROOT::RVec &electron_mass, const ROOT::RVec &electron_iso, + const ROOT::RVec &electron_charge, const ROOT::RVec &muon_pt, const ROOT::RVec &muon_eta, const ROOT::RVec &muon_phi, const ROOT::RVec &muon_mass, const ROOT::RVec &muon_iso, + const ROOT::RVec &muon_charge, const ROOT::RVec &good_electron_mask, const ROOT::RVec &base_electron_mask, const ROOT::RVec &good_muon_mask, @@ -413,16 +416,20 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, ->debug("{} debuging1 {}", original_tau_indices.at(candidate_tau), sorted_tau_idx.at(candidate_tau)); + auto tauh_charge = + tau_charge.at(original_tau_indices.at(candidate_tau)); ROOT::Math::PtEtaPhiMVector tau = ROOT::Math::PtEtaPhiMVector( tau_pt.at(original_tau_indices.at(candidate_tau)), tau_eta.at(original_tau_indices.at(candidate_tau)), tau_phi.at(original_tau_indices.at(candidate_tau)), tau_mass.at(original_tau_indices.at(candidate_tau))); for (auto &candidate_mu : selected_mu_indices) { + auto mu_charge = muon_charge.at(candidate_mu); ROOT::Math::PtEtaPhiMVector muon = ROOT::Math::PtEtaPhiMVector( muon_pt.at(candidate_mu), muon_eta.at(candidate_mu), muon_phi.at(candidate_mu), muon_mass.at(candidate_mu)); for (auto &candidate_ele : selected_ele_indices) { + auto ele_charge = electron_charge.at(candidate_ele); ROOT::Math::PtEtaPhiMVector electron = ROOT::Math::PtEtaPhiMVector( electron_pt.at(candidate_ele), @@ -443,7 +450,9 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, ROOT::Math::VectorUtil::DeltaR(muon, tau) > mindeltaR_leptau && ROOT::Math::VectorUtil::DeltaR(electron, muon) > - mindeltaR_leplep) { + mindeltaR_leplep && + ((mu_charge * ele_charge) > 0.0) && + ((mu_charge * tauh_charge) < 0.0)) { Logger::get("three_flavor::TripleSmuctionAlgo") ->debug("indices of selected candidates: muon {}, " "electron {}, tau {}", @@ -645,23 +654,26 @@ namespace two_flavor { /// second one beeing the lepton from the tau index and the third one the /// hadronic tau index. auto TripleSelectionAlgo(const float &mindeltaR_leptau, - const float &mindeltaR_leplep) { + const float &mindeltaR_leplep, + const std::string &ss_or_os) { Logger::get("two_flavor::TripleSelectionAlgo") ->debug("Setting up algorithm"); - return [mindeltaR_leptau, - mindeltaR_leplep](const ROOT::RVec &tau_pt, - const ROOT::RVec &tau_eta, - const ROOT::RVec &tau_phi, - const ROOT::RVec &tau_mass, - const ROOT::RVec &tau_iso, - const ROOT::RVec &muon_pt, - const ROOT::RVec &muon_eta, - const ROOT::RVec &muon_phi, - const ROOT::RVec &muon_mass, - const ROOT::RVec &muon_iso, - const ROOT::RVec &good_muon_mask, - const ROOT::RVec &base_muon_mask, - const ROOT::RVec &tau_mask) { + return [mindeltaR_leptau, mindeltaR_leplep, + ss_or_os](const ROOT::RVec &tau_pt, + const ROOT::RVec &tau_eta, + const ROOT::RVec &tau_phi, + const ROOT::RVec &tau_mass, + const ROOT::RVec &tau_iso, + const ROOT::RVec &tau_charge, + const ROOT::RVec &muon_pt, + const ROOT::RVec &muon_eta, + const ROOT::RVec &muon_phi, + const ROOT::RVec &muon_mass, + const ROOT::RVec &muon_iso, + const ROOT::RVec &muon_charge, + const ROOT::RVec &good_muon_mask, + const ROOT::RVec &base_muon_mask, + const ROOT::RVec &tau_mask) { ROOT::RVec selected_triple = {-1, -1, -1}; const auto original_tau_indices = ROOT::VecOps::Nonzero(tau_mask); const auto ind_good_muons = ROOT::VecOps::Nonzero(good_muon_mask); @@ -727,7 +739,12 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, // therefore the loops over the muons have no influence on the pairing. // For the fake rate region, more muons are allowed. The algorithm // prefers triples with the most isolated muon. + // w/ charge req.: + // && + // mu1_charge*tauh_charge < 0.0 for (auto &candidate_tau : sorted_tau_idx) { + auto tauh_charge = + tau_charge.at(original_tau_indices.at(candidate_tau)); Logger::get("two_flavour::TripleSelectionAlgo") ->debug("original_tau_indices.at(candidate_tau) {}, " "sorted_tau_idx.at(candidate_tau) {}", @@ -739,6 +756,7 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, tau_phi.at(original_tau_indices.at(candidate_tau)), tau_mass.at(original_tau_indices.at(candidate_tau))); for (int i = 0; i < selected_mu_indices.size() - 1; i = i + 1) { + auto mu1_charge = muon_charge.at(selected_mu_indices.at(i)); ROOT::Math::PtEtaPhiMVector muon_1 = ROOT::Math::PtEtaPhiMVector( muon_pt.at(selected_mu_indices.at(i)), @@ -746,6 +764,7 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, muon_phi.at(selected_mu_indices.at(i)), muon_mass.at(selected_mu_indices.at(i))); for (int j = i + 1; j < selected_mu_indices.size(); j = j + 1) { + auto mu2_charge = muon_charge.at(selected_mu_indices.at(j)); ROOT::Math::PtEtaPhiMVector muon_2 = ROOT::Math::PtEtaPhiMVector( muon_pt.at(selected_mu_indices.at(j)), @@ -767,39 +786,49 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, mindeltaR_leptau && ROOT::Math::VectorUtil::DeltaR(muon_1, muon_2) > mindeltaR_leplep) { - Logger::get("two_flavour::TripleSelectionAlgo") - ->debug("pt muon_1 {}, pt muon_2 {}", muon_1.Pt(), - muon_2.Pt()); - if (muon_1.Pt() > muon_2.Pt()) { + if (((ss_or_os == "os") && + (mu1_charge * mu2_charge < 0.0)) or + ((ss_or_os == "ss") && + (mu1_charge * mu2_charge > 0.0 && + ((mu1_charge * tauh_charge) < 0.0)))) { Logger::get("two_flavour::TripleSelectionAlgo") - ->debug("Selected original triple indices: " + ->debug("pt muon_1 {}, pt muon_2 {}", + muon_1.Pt(), muon_2.Pt()); + if (muon_1.Pt() > muon_2.Pt()) { + Logger::get("two_flavour::TripleSelectionAlgo") + ->debug( + "Selected original triple indices: " "mu_1 = {}, mu_2 = {} , tau = {}", selected_mu_indices.at(i), selected_mu_indices.at(j), original_tau_indices.at(candidate_tau)); - selected_triple = { - static_cast(selected_mu_indices.at(i)), - static_cast(selected_mu_indices.at(j)), - static_cast( - original_tau_indices.at(candidate_tau))}; - Logger::get("two_flavour::TripleSelectionAlgo") - ->debug("selected triple {}", selected_triple); - } else { - Logger::get("two_flavour::TripleSelectionAlgo") - ->debug("Selected original triple indices: " + selected_triple = { + static_cast(selected_mu_indices.at(i)), + static_cast(selected_mu_indices.at(j)), + static_cast(original_tau_indices.at( + candidate_tau))}; + Logger::get("two_flavour::TripleSelectionAlgo") + ->debug("selected triple {}", + selected_triple); + } else { + Logger::get("two_flavour::TripleSelectionAlgo") + ->debug( + "Selected original triple indices: " "mu_1 = {}, mu_2 = {} , tau = {}", selected_mu_indices.at(j), selected_mu_indices.at(i), original_tau_indices.at(candidate_tau)); - selected_triple = { - static_cast(selected_mu_indices.at(j)), - static_cast(selected_mu_indices.at(i)), - static_cast( - original_tau_indices.at(candidate_tau))}; - Logger::get("two_flavour::TripleSelectionAlgo") - ->debug("selected triple {}", selected_triple); + selected_triple = { + static_cast(selected_mu_indices.at(j)), + static_cast(selected_mu_indices.at(i)), + static_cast(original_tau_indices.at( + candidate_tau))}; + Logger::get("two_flavour::TripleSelectionAlgo") + ->debug("selected triple {}", + selected_triple); + } + return selected_triple; } - return selected_triple; } } } @@ -833,10 +862,12 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, const ROOT::RVec &tau_phi, const ROOT::RVec &tau_mass, const ROOT::RVec &tau_iso, + const ROOT::RVec &tau_charge, const ROOT::RVec &lepton_pt, const ROOT::RVec &lepton_eta, const ROOT::RVec &lepton_phi, const ROOT::RVec &lepton_mass, + const ROOT::RVec &lepton_charge, const ROOT::RVec &lepton_mask, const ROOT::RVec &tau_mask) { ROOT::RVec selected_triple = {-1, -1, -1}; @@ -918,12 +949,14 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, // deltaR and reject a pair if the candidates are too close for (auto &candidate : sorted_pairs) { auto tau_index_1 = original_tau_indices[candidate.first]; + auto tauh_charge_1 = tau_charge.at(tau_index_1); ROOT::Math::PtEtaPhiMVector tau_1 = ROOT::Math::PtEtaPhiMVector( tau_pt.at(tau_index_1), tau_eta.at(tau_index_1), tau_phi.at(tau_index_1), tau_mass.at(tau_index_1)); Logger::get("lep_tautau::TripleSelectionAlgo") ->debug("{} leadint tau vector: {}", tau_index_1, tau_1); auto tau_index_2 = original_tau_indices[candidate.second]; + auto tauh_charge_2 = tau_charge.at(tau_index_2); ROOT::Math::PtEtaPhiMVector tau_2 = ROOT::Math::PtEtaPhiMVector( tau_pt.at(tau_index_2), tau_eta.at(tau_index_2), tau_phi.at(tau_index_2), tau_mass.at(tau_index_2)); @@ -937,7 +970,8 @@ auto TripleSelectionAlgo(const float &mindeltaR_leptau, ROOT::Math::VectorUtil::DeltaR(tau_1, lepton) > mindeltaR_leptau && ROOT::Math::VectorUtil::DeltaR(tau_2, lepton) > - mindeltaR_leptau) { + mindeltaR_leptau && + tauh_charge_1 * tauh_charge_2 < 0.0) { Logger::get("lep_tautau::TripleSelectionAlgo") ->debug("Selected original pair indices: tau_1 = {} , " "tau_2 = {}", @@ -992,11 +1026,13 @@ auto TripleSelectionAlgo(const float &mindeltaR_lep1lep1, const ROOT::RVec &lep1_phi, const ROOT::RVec &lep1_mass, const ROOT::RVec &lep1_iso, + const ROOT::RVec &lep1_charge, const ROOT::RVec &lep2_pt, const ROOT::RVec &lep2_eta, const ROOT::RVec &lep2_phi, const ROOT::RVec &lep2_mass, const ROOT::RVec &lep2_iso, + const ROOT::RVec &lep2_charge, const ROOT::RVec &lep1_mask, const ROOT::RVec &lep2_mask) { // first 2 entries are lep1 indices, @@ -1042,12 +1078,15 @@ auto TripleSelectionAlgo(const float &mindeltaR_lep1lep1, } for (int i = 0; i < selected_lep1_indices.size() - 1; i = i + 1) { + auto lep1_1_charge = lep1_charge.at(selected_lep1_indices.at(i)); ROOT::Math::PtEtaPhiMVector lep1_1 = ROOT::Math::PtEtaPhiMVector( lep1_pt.at(selected_lep1_indices.at(i)), lep1_eta.at(selected_lep1_indices.at(i)), lep1_phi.at(selected_lep1_indices.at(i)), lep1_mass.at(selected_lep1_indices.at(i))); for (int j = i + 1; j < selected_lep1_indices.size(); j = j + 1) { + auto lep1_2_charge = + lep1_charge.at(selected_lep1_indices.at(j)); ROOT::Math::PtEtaPhiMVector lep1_2 = ROOT::Math::PtEtaPhiMVector( lep1_pt.at(selected_lep1_indices.at(j)), @@ -1075,7 +1114,8 @@ auto TripleSelectionAlgo(const float &mindeltaR_lep1lep1, ROOT::Math::VectorUtil::DeltaR(lep1_1, lep2) > mindeltaR_lep1lep2 && ROOT::Math::VectorUtil::DeltaR(lep1_2, lep2) > - mindeltaR_lep1lep2) { + mindeltaR_lep1lep2 && + ((lep1_1_charge * lep1_2_charge) < 0.0)) { Logger::get("lep1lep1_lep2::TripleSelectionAlgo") ->debug("lep1_1 Pt = {} , lep1_2 Pt = {} ", lep1_1.Pt(), lep1_2.Pt()); @@ -1289,25 +1329,25 @@ namespace mumutau { - tau_mask containing the flags whether the tau is a good tau or not - muon_mask containing the flags whether the muons are a good muons or not * @param triplename name of the new column containing the triple index - * @param mindeltaR_leptau the seperation between each lepton and the tau has to - be larger than - * this value - * @param mindeltaR_leplep the seperation between the leptons has to be larger - than + * @param mindeltaR_leptau the seperation between each lepton and the tau * this value + * @param mindeltaR_leplep the seperation between the leptons + * @param ss_or_os requirement of ss or os for the two muons (ss for analysis, + os for FF estimation) * @return a new dataframe with the triple index column added */ ROOT::RDF::RNode TripleSelection(ROOT::RDF::RNode df, const std::vector &input_vector, const std::string &triplename, const float &mindeltaR_leptau, - const float &mindeltaR_leplep) { + const float &mindeltaR_leplep, + const std::string &ss_or_os) { Logger::get("mumutau::TripleSelection") ->debug("Setting up MuMuTau Triple building"); auto df1 = df.Define(triplename, whtautau_tripleselection::two_flavor::TripleSelectionAlgo( - mindeltaR_leptau, mindeltaR_leplep), + mindeltaR_leptau, mindeltaR_leplep, ss_or_os), input_vector); return df1; }