diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 955cd8769bce..c2f419851336 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -226,21 +226,19 @@ jobs: }, { "name": "Linux aarch64", - "os": "ubuntu-latest", + "os": "nscloud-ubuntu-22.04-arm64-4x8", "CMAKE_OPTIONS": "-DUSE_GMP=OFF -DLEAN_INSTALL_SUFFIX=-linux_aarch64", "release": true, "check-level": 2, - "cross": true, - "cross_target": "aarch64-unknown-linux-gnu", "shell": "nix develop .#oldGlibcAArch -c bash -euxo pipefail {0}", - "llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-x86_64-linux-gnu.tar.zst https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-aarch64-linux-gnu.tar.zst", - "prepare-llvm": "../script/prepare-llvm-linux.sh lean-llvm-aarch64-* lean-llvm-x86_64-*" + "llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/15.0.1/lean-llvm-aarch64-linux-gnu.tar.zst", + "prepare-llvm": "../script/prepare-llvm-linux.sh lean-llvm*" }, { "name": "Linux 32bit", "os": "ubuntu-latest", // Use 32bit on stage0 and stage1 to keep oleans compatible - "CMAKE_OPTIONS": "-DSTAGE0_USE_GMP=OFF -DSTAGE0_LEAN_EXTRA_CXX_FLAGS='-m32' -DSTAGE0_LEANC_OPTS='-m32' -DSTAGE0_MMAP=OFF -DUSE_GMP=OFF -DLEAN_EXTRA_CXX_FLAGS='-m32' -DLEANC_OPTS='-m32' -DMMAP=OFF -DLEAN_INSTALL_SUFFIX=-linux_x86", + "CMAKE_OPTIONS": "-DSTAGE0_USE_GMP=OFF -DSTAGE0_LEAN_EXTRA_CXX_FLAGS='-m32' -DSTAGE0_LEANC_OPTS='-m32' -DSTAGE0_MMAP=OFF -DUSE_GMP=OFF -DLEAN_EXTRA_CXX_FLAGS='-m32' -DLEANC_OPTS='-m32' -DMMAP=OFF -DLEAN_INSTALL_SUFFIX=-linux_x86 -DCMAKE_LIBRARY_PATH=/usr/lib/i386-linux-gnu/", "cmultilib": true, "release": true, "check-level": 2, @@ -259,7 +257,7 @@ jobs: "cross": true, "shell": "bash -euxo pipefail {0}", // Just a few selected tests because wasm is slow - "CTEST_OPTIONS": "-R \"leantest_1007\\.lean|leantest_Format\\.lean|leanruntest\\_1037.lean|leanruntest_ac_rfl\\.lean\"" + "CTEST_OPTIONS": "-R \"leantest_1007\\.lean|leantest_Format\\.lean|leanruntest\\_1037.lean|leanruntest_ac_rfl\\.lean|leanruntest_libuv\\.lean\"" } ]; console.log(`matrix:\n${JSON.stringify(matrix, null, 2)}`) @@ -299,11 +297,11 @@ jobs: with: msystem: clang64 # `:` means do not prefix with msystem - pacboy: "make: python: cmake clang ccache gmp git: zip: unzip: diffutils: binutils: tree: zstd tar:" + pacboy: "make: python: cmake clang ccache gmp libuv git: zip: unzip: diffutils: binutils: tree: zstd tar:" if: runner.os == 'Windows' - name: Install Brew Packages run: | - brew install ccache tree zstd coreutils gmp + brew install ccache tree zstd coreutils gmp libuv if: runner.os == 'macOS' - name: Checkout uses: actions/checkout@v4 @@ -327,17 +325,19 @@ jobs: if: matrix.wasm - name: Install 32bit c libs run: | + sudo dpkg --add-architecture i386 sudo apt-get update - sudo apt-get install -y gcc-multilib g++-multilib ccache + sudo apt-get install -y gcc-multilib g++-multilib ccache libuv1-dev:i386 if: matrix.cmultilib - name: Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: .ccache key: ${{ matrix.name }}-build-v3-${{ github.event.pull_request.head.sha }} # fall back to (latest) previous cache restore-keys: | ${{ matrix.name }}-build-v3 + save-always: true # open nix-shell once for initial setup - name: Setup run: | @@ -382,6 +382,12 @@ jobs: make -C build install - name: Check Binaries run: ${{ matrix.binary-check }} lean-*/bin/* || true + - name: Count binary symbols + run: | + for f in lean-*/bin/*; do + echo "$f: $(nm $f | grep " T " | wc -l) exported symbols" + done + if: matrix.name == 'Windows' - name: List Install Tree run: | # omit contents of Init/, ... diff --git a/.github/workflows/nix-ci.yml b/.github/workflows/nix-ci.yml index 9db7eea0538d..ac8b8c6ada7e 100644 --- a/.github/workflows/nix-ci.yml +++ b/.github/workflows/nix-ci.yml @@ -55,13 +55,14 @@ jobs: # the default is to use a virtual merge commit between the PR and master: just use the PR ref: ${{ github.event.pull_request.head.sha }} - name: Set Up Nix Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: nix-store-cache key: ${{ matrix.name }}-nix-store-cache-${{ github.sha }} # fall back to (latest) previous cache restore-keys: | ${{ matrix.name }}-nix-store-cache + save-always: true - name: Further Set Up Nix Cache shell: bash -euxo pipefail {0} run: | @@ -78,13 +79,14 @@ jobs: sudo mkdir -m0770 -p /nix/var/cache/ccache sudo chown -R $USER /nix/var/cache/ccache - name: Setup CCache Cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: /nix/var/cache/ccache key: ${{ matrix.name }}-nix-ccache-${{ github.sha }} # fall back to (latest) previous cache restore-keys: | ${{ matrix.name }}-nix-ccache + save-always: true - name: Further Set Up CCache Cache run: | sudo chown -R root:nixbld /nix/var/cache diff --git a/.github/workflows/pr-release.yml b/.github/workflows/pr-release.yml index 77109a339461..efa37e3c684b 100644 --- a/.github/workflows/pr-release.yml +++ b/.github/workflows/pr-release.yml @@ -328,7 +328,7 @@ jobs: git switch -c lean-pr-testing-${{ steps.workflow-info.outputs.pullRequestNumber }} "$BASE" echo "leanprover/lean4-pr-releases:pr-release-${{ steps.workflow-info.outputs.pullRequestNumber }}" > lean-toolchain git add lean-toolchain - sed -i 's,require "leanprover-community" / "batteries" @ ".\+",require "leanprover-community" / "batteries" @ "git#nightly-testing-'"${MOST_RECENT_NIGHTLY}"'",' lakefile.lean + sed -i 's,require "leanprover-community" / "batteries" @ git ".\+",require "leanprover-community" / "batteries" @ git "nightly-testing-'"${MOST_RECENT_NIGHTLY}"'",' lakefile.lean lake update batteries git add lakefile.lean lake-manifest.json git commit -m "Update lean-toolchain for testing https://github.com/leanprover/lean4/pull/${{ steps.workflow-info.outputs.pullRequestNumber }}" diff --git a/.github/workflows/update-stage0.yml b/.github/workflows/update-stage0.yml index fdeda92066a0..332a88309ff1 100644 --- a/.github/workflows/update-stage0.yml +++ b/.github/workflows/update-stage0.yml @@ -47,7 +47,7 @@ jobs: # uses: DeterminateSystems/magic-nix-cache-action@v2 - if: env.should_update_stage0 == 'yes' name: Restore Build Cache - uses: actions/cache/restore@v3 + uses: actions/cache/restore@v4 with: path: nix-store-cache key: Nix Linux-nix-store-cache-${{ github.sha }} diff --git a/RELEASES.md b/RELEASES.md index 6e610a987fa2..7d8a0c41b121 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -8,10 +8,14 @@ This file contains work-in-progress notes for the upcoming release, as well as p Please check the [releases](https://github.com/leanprover/lean4/releases) page for the current status of each version. -v4.11.0 +v4.12.0 ---------- Development in progress. +v4.11.0 +---------- +Release candidate, release notes will be copied from the branch `releases/v4.11.0` once completed. + v4.10.0 ---------- diff --git a/doc/dev/release_checklist.md b/doc/dev/release_checklist.md index e095f3d067ef..27e039499b04 100644 --- a/doc/dev/release_checklist.md +++ b/doc/dev/release_checklist.md @@ -192,7 +192,7 @@ We'll use `v4.7.0-rc1` as the intended release version in this example. finalized release notes from the `releases/v4.6.0` branch. - Replaces the "development in progress" in the `v4.7.0` section of `RELEASES.md` with ``` - Release candidate, release notes will be copied from `branch releases/v4.7.0` once completed. + Release candidate, release notes will be copied from the branch `releases/v4.7.0` once completed. ``` and inserts the following section before that section: ``` @@ -201,6 +201,8 @@ We'll use `v4.7.0-rc1` as the intended release version in this example. Development in progress. ``` - Removes all the entries from the `./releases_drafts/` folder. + - Titled "chore: begin development cycle for v4.8.0" + ## Time estimates: Slightly longer than the corresponding steps for a stable release. diff --git a/doc/make/index.md b/doc/make/index.md index 8772bdb431dc..1bf663760ce8 100644 --- a/doc/make/index.md +++ b/doc/make/index.md @@ -8,6 +8,7 @@ Requirements - C++14 compatible compiler - [CMake](http://www.cmake.org) - [GMP (GNU multiprecision library)](http://gmplib.org/) +- [LibUV](https://libuv.org/) Platform-Specific Setup ----------------------- diff --git a/doc/make/msys2.md b/doc/make/msys2.md index fa3d28dfde0a..952db856d70e 100644 --- a/doc/make/msys2.md +++ b/doc/make/msys2.md @@ -25,7 +25,7 @@ MSYS2 has a package management system, [pacman][pacman], which is used in Arch L Here are the commands to install all dependencies needed to compile Lean on your machine. ```bash -pacman -S make python mingw-w64-x86_64-cmake mingw-w64-x86_64-clang mingw-w64-x86_64-ccache git unzip diffutils binutils +pacman -S make python mingw-w64-x86_64-cmake mingw-w64-x86_64-clang mingw-w64-x86_64-ccache mingw-w64-x86_64-libuv mingw-w64-x86_64-gmp git unzip diffutils binutils ``` You should now be able to run these commands: @@ -64,6 +64,7 @@ they are installed in your MSYS setup: - libgcc_s_seh-1.dll - libstdc++-6.dll - libgmp-10.dll +- libuv-1.dll - libwinpthread-1.dll The following linux command will do that: diff --git a/doc/make/osx-10.9.md b/doc/make/osx-10.9.md index 0563aa6bbfa7..ca63b4723091 100644 --- a/doc/make/osx-10.9.md +++ b/doc/make/osx-10.9.md @@ -37,10 +37,11 @@ cmake -DCMAKE_CXX_COMPILER=g++ ... ```bash brew install cmake brew install gmp +brew install libuv ``` ## Recommended Packages: CCache ```bash brew install ccache -``` \ No newline at end of file +``` diff --git a/doc/make/ubuntu.md b/doc/make/ubuntu.md index 15a19145f449..abeb1a127f7a 100644 --- a/doc/make/ubuntu.md +++ b/doc/make/ubuntu.md @@ -8,5 +8,5 @@ follow the [generic build instructions](index.md). ## Basic packages ```bash -sudo apt-get install git libgmp-dev cmake ccache clang +sudo apt-get install git libgmp-dev libuv1-dev cmake ccache clang ``` diff --git a/flake.nix b/flake.nix index cceb1de454cb..3319db4c45de 100644 --- a/flake.nix +++ b/flake.nix @@ -21,7 +21,7 @@ stdenv = pkgs.overrideCC pkgs.stdenv lean-packages.llvmPackages.clang; } ({ buildInputs = with pkgs; [ - cmake gmp ccache + cmake gmp libuv ccache lean-packages.llvmPackages.llvm # llvm-symbolizer for asan/lsan gdb # TODO: only add when proven to not affect the flakification @@ -34,6 +34,7 @@ CTEST_OUTPUT_ON_FAILURE = 1; } // pkgs.lib.optionalAttrs pkgs.stdenv.isLinux { GMP = pkgsDist.gmp.override { withStatic = true; }; + LIBUV = pkgsDist.libuv.overrideAttrs (attrs: { configureFlags = ["--enable-static"]; }); GLIBC = pkgsDist.glibc; GLIBC_DEV = pkgsDist.glibc.dev; GCC_LIB = pkgsDist.gcc.cc.lib; diff --git a/nix/bootstrap.nix b/nix/bootstrap.nix index 4a31516fdc43..033ee7cd28d7 100644 --- a/nix/bootstrap.nix +++ b/nix/bootstrap.nix @@ -1,5 +1,5 @@ { src, debug ? false, stage0debug ? false, extraCMakeFlags ? [], - stdenv, lib, cmake, gmp, git, gnumake, bash, buildLeanPackage, writeShellScriptBin, runCommand, symlinkJoin, lndir, perl, gnused, darwin, llvmPackages, linkFarmFromDrvs, + stdenv, lib, cmake, gmp, libuv, git, gnumake, bash, buildLeanPackage, writeShellScriptBin, runCommand, symlinkJoin, lndir, perl, gnused, darwin, llvmPackages, linkFarmFromDrvs, ... } @ args: with builtins; lib.warn "The Nix-based build is deprecated" rec { @@ -7,7 +7,7 @@ lib.warn "The Nix-based build is deprecated" rec { sourceByRegex = p: rs: lib.sourceByRegex p (map (r: "(/src/)?${r}") rs); buildCMake = args: stdenv.mkDerivation ({ nativeBuildInputs = [ cmake ]; - buildInputs = [ gmp llvmPackages.llvm ]; + buildInputs = [ gmp libuv llvmPackages.llvm ]; # https://github.com/NixOS/nixpkgs/issues/60919 hardeningDisable = [ "all" ]; dontStrip = (args.debug or debug); @@ -26,11 +26,7 @@ lib.warn "The Nix-based build is deprecated" rec { lean-bin-tools-unwrapped = buildCMake { name = "lean-bin-tools"; outputs = [ "out" "leanc_src" ]; - realSrc = sourceByRegex (src + "/src") [ "CMakeLists\.txt" "cmake.*" "bin.*" "include.*" ".*\.in" "Leanc\.lean" ]; - preConfigure = '' - touch empty.cpp - sed -i 's/add_subdirectory.*//;s/set(LEAN_OBJS.*/set(LEAN_OBJS empty.cpp)/' CMakeLists.txt - ''; + realSrc = sourceByRegex (src + "/src") [ "CMakeLists\.txt" "[a-z].*" ".*\.in" "Leanc\.lean" ]; dontBuild = true; installPhase = '' mkdir $out $leanc_src @@ -45,11 +41,10 @@ lib.warn "The Nix-based build is deprecated" rec { leancpp = buildCMake { name = "leancpp"; src = src + "/src"; - buildFlags = [ "leancpp" "leanrt" "leanrt_initial-exec" "shell" ]; + buildFlags = [ "leancpp" "leanrt" "leanrt_initial-exec" "leanshell" "leanmain" ]; installPhase = '' mkdir -p $out mv lib/ $out/ - mv shell/CMakeFiles/shell.dir/lean.cpp.o $out/lib mv runtime/libleanrt_initial-exec.a $out/lib ''; }; @@ -122,12 +117,15 @@ lib.warn "The Nix-based build is deprecated" rec { touch empty.c ${stdenv.cc}/bin/cc -shared -o $out/$libName empty.c ''; + leanshared_1 = runCommand "leanshared_1" { buildInputs = [ stdenv.cc ]; libName = "leanshared_1${stdenv.hostPlatform.extensions.sharedLibrary}"; } '' + mkdir $out + touch empty.c + ${stdenv.cc}/bin/cc -shared -o $out/$libName empty.c + ''; leanshared = runCommand "leanshared" { buildInputs = [ stdenv.cc ]; libName = "libleanshared${stdenv.hostPlatform.extensions.sharedLibrary}"; } '' mkdir $out LEAN_CC=${stdenv.cc}/bin/cc ${lean-bin-tools-unwrapped}/bin/leanc -shared ${lib.optionalString stdenv.isLinux "-Wl,-Bsymbolic"} \ - ${if stdenv.isDarwin - then "-Wl,-force_load,${Init.staticLib}/libInit.a -Wl,-force_load,${Std.staticLib}/libStd.a -Wl,-force_load,${Lean.staticLib}/libLean.a -Wl,-force_load,${leancpp}/lib/lean/libleancpp.a ${leancpp}/lib/libleanrt_initial-exec.a -lc++" - else "-Wl,--whole-archive -lInit -lStd -lLean -lleancpp ${leancpp}/lib/libleanrt_initial-exec.a -Wl,--no-whole-archive -lstdc++"} \ + -Wl,--whole-archive ${leancpp}/lib/temp/libleanshell.a -lInit -lStd -lLean -lleancpp ${leancpp}/lib/libleanrt_initial-exec.a -Wl,--no-whole-archive -lstdc++ \ -lm ${stdlibLinkFlags} \ $(${llvmPackages.libllvm.dev}/bin/llvm-config --ldflags --libs) \ -o $out/$libName @@ -135,18 +133,18 @@ lib.warn "The Nix-based build is deprecated" rec { mods = foldl' (mods: pkg: mods // pkg.mods) {} stdlib; print-paths = Lean.makePrintPathsFor [] mods; leanc = writeShellScriptBin "leanc" '' - LEAN_CC=${stdenv.cc}/bin/cc ${Leanc.executable}/bin/leanc -I${lean-bin-tools-unwrapped}/include ${stdlibLinkFlags} -L${libInit_shared} -L${leanshared} "$@" + LEAN_CC=${stdenv.cc}/bin/cc ${Leanc.executable}/bin/leanc -I${lean-bin-tools-unwrapped}/include ${stdlibLinkFlags} -L${libInit_shared} -L${leanshared_1} -L${leanshared} "$@" ''; lean = runCommand "lean" { buildInputs = lib.optional stdenv.isDarwin darwin.cctools; } '' mkdir -p $out/bin - ${leanc}/bin/leanc ${leancpp}/lib/lean.cpp.o ${libInit_shared}/* ${leanshared}/* -o $out/bin/lean + ${leanc}/bin/leanc ${leancpp}/lib/temp/libleanmain.a ${libInit_shared}/* ${leanshared_1}/* ${leanshared}/* -o $out/bin/lean ''; # derivation following the directory layout of the "basic" setup, mostly useful for running tests lean-all = stdenv.mkDerivation { name = "lean-${desc}"; buildCommand = '' mkdir -p $out/bin $out/lib/lean - ln -sf ${leancpp}/lib/lean/* ${lib.concatMapStringsSep " " (l: "${l.modRoot}/* ${l.staticLib}/*") (lib.reverseList stdlib)} ${libInit_shared}/* ${leanshared}/* $out/lib/lean/ + ln -sf ${leancpp}/lib/lean/* ${lib.concatMapStringsSep " " (l: "${l.modRoot}/* ${l.staticLib}/*") (lib.reverseList stdlib)} ${libInit_shared}/* ${leanshared_1}/* ${leanshared}/* $out/lib/lean/ # put everything in a single final derivation so `IO.appDir` references work cp ${lean}/bin/lean ${leanc}/bin/leanc ${Lake-Main.executable}/bin/lake $out/bin # NOTE: `lndir` will not override existing `bin/leanc` @@ -160,7 +158,7 @@ lib.warn "The Nix-based build is deprecated" rec { test = buildCMake { name = "lean-test-${desc}"; realSrc = lib.sourceByRegex src [ "src.*" "tests.*" ]; - buildInputs = [ gmp perl git ]; + buildInputs = [ gmp libuv perl git ]; preConfigure = '' cd src ''; @@ -171,7 +169,7 @@ lib.warn "The Nix-based build is deprecated" rec { ln -sf ${lean-all}/* . ''; buildPhase = '' - ctest --output-junit test-results.xml --output-on-failure -E 'leancomptest_(doc_example|foreign)' -j$NIX_BUILD_CORES + ctest --output-junit test-results.xml --output-on-failure -E 'leancomptest_(doc_example|foreign)|leanlaketest_reverse-ffi' -j$NIX_BUILD_CORES ''; installPhase = '' mkdir $out diff --git a/releases_drafts/mutualStructural.md b/releases_drafts/mutualStructural.md deleted file mode 100644 index a1efee9f5d97..000000000000 --- a/releases_drafts/mutualStructural.md +++ /dev/null @@ -1,65 +0,0 @@ -* Structural recursion can now be explicitly requested using - ``` - termination_by structural x - ``` - in analogy to the existing `termination_by x` syntax that causes well-founded recursion to be used. - (#4542) - -* The `termination_by?` syntax no longer forces the use of well-founded recursion, and when structural - recursion is inferred, will print the result using the `termination_by` syntax. - -* Mutual structural recursion is supported now. This supports both mutual recursion over a non-mutual - data type, as well as recursion over mutual or nested data types: - - ```lean - mutual - def Even : Nat → Prop - | 0 => True - | n+1 => Odd n - - def Odd : Nat → Prop - | 0 => False - | n+1 => Even n - end - - mutual - inductive A - | other : B → A - | empty - inductive B - | other : A → B - | empty - end - - mutual - def A.size : A → Nat - | .other b => b.size + 1 - | .empty => 0 - - def B.size : B → Nat - | .other a => a.size + 1 - | .empty => 0 - end - - inductive Tree where | node : List Tree → Tree - - mutual - def Tree.size : Tree → Nat - | node ts => Tree.list_size ts - - def Tree.list_size : List Tree → Nat - | [] => 0 - | t::ts => Tree.size t + Tree.list_size ts - end - ``` - - Functional induction principles are generated for these functions as well (`A.size.induct`, `A.size.mutual_induct`). - - Nested structural recursion is still not supported. - - PRs #4639, #4715, #4642, #4656, #4684, #4715, #4728, #4575, #4731, #4658, #4734, #4738, #4718, - #4733, #4787, #4788, #4789, #4807, #4772 - -* A bugfix in the structural recursion code may in some cases break existing code, when a parameter - of the type of the recursive argument is bound behind indices of that type. This can usually be - fixed by reordering the parameters of the function (PR #4672) diff --git a/script/prepare-llvm-linux.sh b/script/prepare-llvm-linux.sh index 429725b82a9f..40c9889788af 100755 --- a/script/prepare-llvm-linux.sh +++ b/script/prepare-llvm-linux.sh @@ -38,7 +38,7 @@ $CP $GLIBC/lib/*crt* llvm/lib/ $CP $GLIBC/lib/*crt* stage1/lib/ # runtime (cd llvm; $CP --parents lib/clang/*/lib/*/{clang_rt.*.o,libclang_rt.builtins*} ../stage1) -$CP llvm/lib/*/lib{c++,c++abi,unwind}.* $GMP/lib/libgmp.a stage1/lib/ +$CP llvm/lib/*/lib{c++,c++abi,unwind}.* $GMP/lib/libgmp.a $LIBUV/lib/libuv.a stage1/lib/ # LLVM 15 appears to ship the dependencies in 'llvm/lib//' and 'llvm/include//' # but clang-15 that we use to compile is linked against 'llvm/lib/' and 'llvm/include' # https://github.com/llvm/llvm-project/issues/54955 @@ -62,8 +62,8 @@ fi # use `-nostdinc` to make sure headers are not visible by default (in particular, not to `#include_next` in the clang headers), # but do not change sysroot so users can still link against system libs echo -n " -DLEANC_INTERNAL_FLAGS='-nostdinc -isystem ROOT/include/clang' -DLEANC_CC=ROOT/bin/clang" -echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='-L ROOT/lib -L ROOT/lib/glibc ROOT/lib/glibc/libc_nonshared.a -Wl,--as-needed -Wl,-Bstatic -lgmp -lunwind -Wl,-Bdynamic -Wl,--no-as-needed -fuse-ld=lld'" +echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='-L ROOT/lib -L ROOT/lib/glibc ROOT/lib/glibc/libc_nonshared.a -Wl,--as-needed -Wl,-Bstatic -lgmp -lunwind -luv -Wl,-Bdynamic -Wl,--no-as-needed -fuse-ld=lld'" # when not using the above flags, link GMP dynamically/as usual -echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-Wl,--as-needed -lgmp -Wl,--no-as-needed'" +echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-Wl,--as-needed -lgmp -luv -Wl,--no-as-needed'" # do not set `LEAN_CC` for tests echo -n " -DLEAN_TEST_VARS=''" diff --git a/script/prepare-llvm-macos.sh b/script/prepare-llvm-macos.sh index ab81c210ff28..d3cdc61b3f2e 100755 --- a/script/prepare-llvm-macos.sh +++ b/script/prepare-llvm-macos.sh @@ -9,6 +9,7 @@ set -uxo pipefail # use full LLVM release for compiling C++ code, but subset for compiling C code and distribution GMP=${GMP:-$(brew --prefix)} +LIBUV=${LIBUV:-$(brew --prefix)} [[ -d llvm ]] || (mkdir llvm; gtar xf $1 --strip-components 1 --directory llvm) [[ -d llvm-host ]] || if [[ "$#" -gt 1 ]]; then @@ -46,8 +47,9 @@ echo -n " -DLEAN_EXTRA_CXX_FLAGS='${EXTRA_FLAGS:-}'" if [[ -L llvm-host ]]; then echo -n " -DCMAKE_C_COMPILER=$PWD/stage1/bin/clang" gcp $GMP/lib/libgmp.a stage1/lib/ + gcp $LIBUV/lib/libuv.a stage1/lib/ echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='-L ROOT/lib -L ROOT/lib/libc -fuse-ld=lld'" - echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-lgmp'" + echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-lgmp -luv'" else echo -n " -DCMAKE_C_COMPILER=$PWD/llvm-host/bin/clang -DLEANC_OPTS='--sysroot $PWD/stage1 -resource-dir $PWD/stage1/lib/clang/15.0.1 ${EXTRA_FLAGS:-}'" echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='-L ROOT/lib -L ROOT/lib/libc -fuse-ld=lld'" diff --git a/script/prepare-llvm-mingw.sh b/script/prepare-llvm-mingw.sh index b30bcd648802..749fc9d9fac4 100644 --- a/script/prepare-llvm-mingw.sh +++ b/script/prepare-llvm-mingw.sh @@ -31,15 +31,15 @@ cp /clang64/lib/{crtbegin,crtend,crt2,dllcrt2}.o stage1/lib/ # runtime (cd llvm; cp --parents lib/clang/*/lib/*/libclang_rt.builtins* ../stage1) # further dependencies -cp /clang64/lib/lib{m,bcrypt,mingw32,moldname,mingwex,msvcrt,pthread,advapi32,shell32,user32,kernel32,ucrtbase}.* /clang64/lib/libgmp.a llvm/lib/lib{c++,c++abi,unwind}.a stage1/lib/ +cp /clang64/lib/lib{m,bcrypt,mingw32,moldname,mingwex,msvcrt,pthread,advapi32,shell32,user32,kernel32,ucrtbase}.* /clang64/lib/libgmp.a /clang64/lib/libuv.a llvm/lib/lib{c++,c++abi,unwind}.a stage1/lib/ echo -n " -DLEAN_STANDALONE=ON" echo -n " -DCMAKE_C_COMPILER=$PWD/stage1/bin/clang.exe -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER=$PWD/llvm/bin/clang++.exe -DCMAKE_CXX_COMPILER_WORKS=1 -DLEAN_CXX_STDLIB='-lc++ -lc++abi'" echo -n " -DSTAGE0_CMAKE_C_COMPILER=clang -DSTAGE0_CMAKE_CXX_COMPILER=clang++" echo -n " -DLEAN_EXTRA_CXX_FLAGS='--sysroot $PWD/llvm -idirafter /clang64/include/'" echo -n " -DLEANC_INTERNAL_FLAGS='--sysroot ROOT -nostdinc -isystem ROOT/include/clang' -DLEANC_CC=ROOT/bin/clang.exe" -echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='-L ROOT/lib -static-libgcc -Wl,-Bstatic -lgmp -lunwind -Wl,-Bdynamic -fuse-ld=lld'" +echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='-L ROOT/lib -static-libgcc -Wl,-Bstatic -lgmp -luv -lunwind -Wl,-Bdynamic -fuse-ld=lld'" # when not using the above flags, link GMP dynamically/as usual -echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-lgmp -lucrtbase'" +echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-lgmp -luv -lucrtbase'" # do not set `LEAN_CC` for tests echo -n " -DAUTO_THREAD_FINALIZATION=OFF -DSTAGE0_AUTO_THREAD_FINALIZATION=OFF" echo -n " -DLEAN_TEST_VARS=''" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 06991773883e..c8dc6a277deb 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -10,7 +10,7 @@ endif() include(ExternalProject) project(LEAN CXX C) set(LEAN_VERSION_MAJOR 4) -set(LEAN_VERSION_MINOR 11) +set(LEAN_VERSION_MINOR 12) set(LEAN_VERSION_PATCH 0) set(LEAN_VERSION_IS_RELEASE 0) # This number is 1 in the release revision, and 0 otherwise. set(LEAN_SPECIAL_VERSION_DESC "" CACHE STRING "Additional version description like 'nightly-2018-03-11'") @@ -243,6 +243,15 @@ if("${USE_GMP}" MATCHES "ON") endif() endif() +if(NOT "${CMAKE_SYSTEM_NAME}" MATCHES "Emscripten") + # LibUV + find_package(LibUV 1.0.0 REQUIRED) + include_directories(${LIBUV_INCLUDE_DIR}) +endif() +if(NOT LEAN_STANDALONE) + string(APPEND LEAN_EXTRA_LINKER_FLAGS " ${LIBUV_LIBRARIES}") +endif() + # ccache if(CCACHE AND NOT CMAKE_CXX_COMPILER_LAUNCHER AND NOT CMAKE_C_COMPILER_LAUNCHER) find_program(CCACHE_PATH ccache) @@ -402,8 +411,8 @@ endif() # executable or `leanshared`, plugins would try to look them up at load time (even though they # are already loaded) and probably fail unless we set up LD_LIBRARY_PATH. if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - # import library created by the `leanshared` target - string(APPEND LEANC_SHARED_LINKER_FLAGS " -lInit_shared -lleanshared") + # import libraries created by the stdlib.make targets + string(APPEND LEANC_SHARED_LINKER_FLAGS " -lInit_shared -lleanshared_1 -lleanshared") elseif("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin") string(APPEND LEANC_SHARED_LINKER_FLAGS " -Wl,-undefined,dynamic_lookup") endif() @@ -460,6 +469,22 @@ if(CMAKE_OSX_SYSROOT AND NOT LEAN_STANDALONE) string(APPEND LEANC_EXTRA_FLAGS " ${CMAKE_CXX_SYSROOT_FLAG}${CMAKE_OSX_SYSROOT}") endif() +add_subdirectory(initialize) +add_subdirectory(shell) +# to be included in `leanshared` but not the smaller `leanshared_1` (as it would pull +# in the world) +add_library(leaninitialize STATIC $) +set_target_properties(leaninitialize PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/temp + OUTPUT_NAME leaninitialize) +add_library(leanshell STATIC util/shell.cpp) +set_target_properties(leanshell PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/temp + OUTPUT_NAME leanshell) +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") + string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--whole-archive -lleanmanifest -Wl,--no-whole-archive") +endif() + if(${STAGE} GREATER 1) # reuse C++ parts, which don't change add_library(leanrt_initial-exec STATIC IMPORTED) @@ -468,13 +493,17 @@ if(${STAGE} GREATER 1) add_library(leanrt STATIC IMPORTED) set_target_properties(leanrt PROPERTIES IMPORTED_LOCATION "${CMAKE_BINARY_DIR}/lib/lean/libleanrt.a") + add_library(leancpp_1 STATIC IMPORTED) + set_target_properties(leancpp_1 PROPERTIES + IMPORTED_LOCATION "${CMAKE_BINARY_DIR}/lib/temp/libleancpp_1.a") add_library(leancpp STATIC IMPORTED) set_target_properties(leancpp PROPERTIES IMPORTED_LOCATION "${CMAKE_BINARY_DIR}/lib/lean/libleancpp.a") add_custom_target(copy-leancpp COMMAND cmake -E copy_if_different "${PREV_STAGE}/runtime/libleanrt_initial-exec.a" "${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a" COMMAND cmake -E copy_if_different "${PREV_STAGE}/lib/lean/libleanrt.a" "${CMAKE_BINARY_DIR}/lib/lean/libleanrt.a" - COMMAND cmake -E copy_if_different "${PREV_STAGE}/lib/lean/libleancpp.a" "${CMAKE_BINARY_DIR}/lib/lean/libleancpp.a") + COMMAND cmake -E copy_if_different "${PREV_STAGE}/lib/lean/libleancpp.a" "${CMAKE_BINARY_DIR}/lib/lean/libleancpp.a" + COMMAND cmake -E copy_if_different "${PREV_STAGE}/lib/temp/libleancpp_1.a" "${CMAKE_BINARY_DIR}/lib/temp/libleancpp_1.a") add_dependencies(leancpp copy-leancpp) if(LLVM) add_custom_target(copy-lean-h-bc @@ -494,10 +523,13 @@ else() set(LEAN_OBJS ${LEAN_OBJS} $) add_subdirectory(library/compiler) set(LEAN_OBJS ${LEAN_OBJS} $) - add_subdirectory(initialize) - set(LEAN_OBJS ${LEAN_OBJS} $) - add_library(leancpp STATIC ${LEAN_OBJS}) + # leancpp without `initialize` (see `leaninitialize` above) + add_library(leancpp_1 STATIC ${LEAN_OBJS}) + set_target_properties(leancpp_1 PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/temp + OUTPUT_NAME leancpp_1) + add_library(leancpp STATIC ${LEAN_OBJS} $) set_target_properties(leancpp PROPERTIES OUTPUT_NAME leancpp) endif() @@ -509,25 +541,12 @@ string(REGEX REPLACE "^([a-zA-Z]):" "/\\1" LEAN_BIN "${CMAKE_BINARY_DIR}/bin") # (also looks nicer in the build log) file(RELATIVE_PATH LIB ${LEAN_SOURCE_DIR} ${CMAKE_BINARY_DIR}/lib) -# set up libInit_shared only on Windows; see also stdlib.make.in -if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - set(INIT_SHARED_LINKER_FLAGS "-Wl,--whole-archive ${CMAKE_BINARY_DIR}/lib/temp/libInit.a.export ${CMAKE_BINARY_DIR}/lib/temp/libStd.a.export ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a -Wl,--no-whole-archive -Wl,--out-implib,${CMAKE_BINARY_DIR}/lib/lean/libInit_shared.dll.a") -endif() - -if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - set(LEANSHARED_LINKER_FLAGS "-Wl,-force_load,${CMAKE_BINARY_DIR}/lib/lean/libInit.a -Wl,-force_load,${CMAKE_BINARY_DIR}/lib/lean/libStd.a -Wl,-force_load,${CMAKE_BINARY_DIR}/lib/lean/libLean.a -Wl,-force_load,${CMAKE_BINARY_DIR}/lib/lean/libleancpp.a ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a ${LEANSHARED_LINKER_FLAGS}") -elseif(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - set(LEANSHARED_LINKER_FLAGS "-Wl,--whole-archive ${CMAKE_BINARY_DIR}/lib/temp/libLean.a.export -lleancpp -Wl,--no-whole-archive -lInit_shared -Wl,--out-implib,${CMAKE_BINARY_DIR}/lib/lean/libleanshared.dll.a") -else() - set(LEANSHARED_LINKER_FLAGS "-Wl,--whole-archive -lInit -lStd -lLean -lleancpp -Wl,--no-whole-archive ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a ${LEANSHARED_LINKER_FLAGS}") -endif() - if (${CMAKE_SYSTEM_NAME} MATCHES "Emscripten") # We do not use dynamic linking via leanshared for Emscripten to keep things # simple. (And we are not interested in `Lake` anyway.) To use dynamic # linking, we would probably have to set MAIN_MODULE=2 on `leanshared`, # SIDE_MODULE=2 on `lean`, and set CMAKE_SHARED_LIBRARY_SUFFIX to ".js". - string(APPEND LEAN_EXE_LINKER_FLAGS " ${TOOLCHAIN_STATIC_LINKER_FLAGS} ${EMSCRIPTEN_SETTINGS} -lnodefs.js -s EXIT_RUNTIME=1 -s MAIN_MODULE=1 -s LINKABLE=1 -s EXPORT_ALL=1") + string(APPEND LEAN_EXE_LINKER_FLAGS " ${LIB}/temp/libleanshell.a ${TOOLCHAIN_STATIC_LINKER_FLAGS} ${EMSCRIPTEN_SETTINGS} -lnodefs.js -s EXIT_RUNTIME=1 -s MAIN_MODULE=1 -s LINKABLE=1 -s EXPORT_ALL=1") endif() # Build the compiler using the bootstrapped C sources for stage0, and use @@ -572,11 +591,11 @@ else() add_custom_target(leanshared ALL WORKING_DIRECTORY ${LEAN_SOURCE_DIR} - DEPENDS Init_shared leancpp + DEPENDS Init_shared leancpp_1 leancpp leanshell leaninitialize COMMAND $(MAKE) -f ${CMAKE_BINARY_DIR}/stdlib.make leanshared VERBATIM) - string(APPEND CMAKE_EXE_LINKER_FLAGS " -lInit_shared -lleanshared") + string(APPEND CMAKE_EXE_LINKER_FLAGS " -lInit_shared -lleanshared_1 -lleanshared") endif() if(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Emscripten") @@ -613,8 +632,6 @@ file(COPY ${LEAN_SOURCE_DIR}/bin/leanmake DESTINATION ${CMAKE_BINARY_DIR}/bin) install(DIRECTORY "${CMAKE_BINARY_DIR}/bin/" USE_SOURCE_PERMISSIONS DESTINATION bin) -add_subdirectory(shell) - add_custom_target(clean-stdlib COMMAND rm -rf "${CMAKE_BINARY_DIR}/lib" || true) diff --git a/src/Init/Core.lean b/src/Init/Core.lean index f1b7b08fdecf..a3b2fec75ec2 100644 --- a/src/Init/Core.lean +++ b/src/Init/Core.lean @@ -1104,6 +1104,13 @@ inductive Relation.TransGen {α : Sort u} (r : α → α → Prop) : α → α /-- Deprecated synonym for `Relation.TransGen`. -/ @[deprecated Relation.TransGen (since := "2024-07-16")] abbrev TC := @Relation.TransGen +theorem Relation.TransGen.trans {α : Sort u} {r : α → α → Prop} {a b c} : + TransGen r a b → TransGen r b c → TransGen r a c := by + intro hab hbc + induction hbc with + | single h => exact TransGen.tail hab h + | tail _ h ih => exact TransGen.tail ih h + /-! # Subtype -/ namespace Subtype diff --git a/src/Init/Data/Array/Lemmas.lean b/src/Init/Data/Array/Lemmas.lean index 490fce71d658..133f15bce6d6 100644 --- a/src/Init/Data/Array/Lemmas.lean +++ b/src/Init/Data/Array/Lemmas.lean @@ -335,6 +335,13 @@ theorem mem_data {a : α} {l : Array α} : a ∈ l.data ↔ a ∈ l := (mem_def theorem not_mem_nil (a : α) : ¬ a ∈ #[] := nofun +theorem getElem_of_mem {a : α} {as : Array α} : + a ∈ as → (∃ (n : Nat) (h : n < as.size), as[n]'h = a) := by + intro ha + rcases List.getElem_of_mem ha.val with ⟨i, hbound, hi⟩ + exists i + exists hbound + /-- # get lemmas -/ theorem lt_of_getElem {x : α} {a : Array α} {idx : Nat} {hidx : idx < a.size} (_ : a[idx] = x) : @@ -735,6 +742,12 @@ theorem getElem_modify_of_ne {as : Array α} {i : Nat} (hj : j < as.size) (as.modify i f)[j]'(by rwa [size_modify]) = as[j] := by simp [getElem_modify hj, h] +@[deprecated getElem_modify (since := "2024-08-08")] +theorem get_modify {arr : Array α} {x i} (h : i < arr.size) : + (arr.modify x f).get ⟨i, by simp [h]⟩ = + if x = i then f (arr.get ⟨i, h⟩) else arr.get ⟨i, h⟩ := by + simp [getElem_modify h] + /-! ### filter -/ @[simp] theorem filter_data (p : α → Bool) (l : Array α) : diff --git a/src/Init/Data/BitVec/Lemmas.lean b/src/Init/Data/BitVec/Lemmas.lean index 3d4a7d3445d3..5d06afcbc8e0 100644 --- a/src/Init/Data/BitVec/Lemmas.lean +++ b/src/Init/Data/BitVec/Lemmas.lean @@ -162,6 +162,16 @@ theorem toNat_zero (n : Nat) : (0#n).toNat = 0 := by trivial @[simp] theorem toNat_mod_cancel (x : BitVec n) : x.toNat % (2^n) = x.toNat := Nat.mod_eq_of_lt x.isLt +@[simp] theorem sub_toNat_mod_cancel {x : BitVec w} (h : ¬ x = 0#w) : + (2 ^ w - x.toNat) % 2 ^ w = 2 ^ w - x.toNat := by + simp only [toNat_eq, toNat_ofNat, Nat.zero_mod] at h + rw [Nat.mod_eq_of_lt (by omega)] + +@[simp] theorem sub_sub_toNat_cancel {x : BitVec w} : + 2 ^ w - (2 ^ w - x.toNat) = x.toNat := by + simp [Nat.sub_sub_eq_min, Nat.min_eq_right] + omega + private theorem lt_two_pow_of_le {x m n : Nat} (lt : x < 2 ^ m) (le : m ≤ n) : x < 2 ^ n := Nat.lt_of_lt_of_le lt (Nat.pow_le_pow_of_le_right (by trivial : 0 < 2) le) @@ -300,8 +310,7 @@ theorem truncate_eq_zeroExtend {v : Nat} {x : BitVec w} : @[simp, bv_toNat] theorem toNat_zeroExtend' {m n : Nat} (p : m ≤ n) (x : BitVec m) : (zeroExtend' p x).toNat = x.toNat := by - unfold zeroExtend' - simp [p, x.isLt, Nat.mod_eq_of_lt] + simp [zeroExtend'] @[bv_toNat] theorem toNat_zeroExtend (i : Nat) (x : BitVec n) : BitVec.toNat (zeroExtend i x) = x.toNat % 2^i := by @@ -507,6 +516,13 @@ theorem or_assoc (x y z : BitVec w) : x ||| y ||| z = x ||| (y ||| z) := by ext i simp [Bool.or_assoc] +instance : Std.Associative (α := BitVec n) (· ||| ·) := ⟨BitVec.or_assoc⟩ + +theorem or_comm (x y : BitVec w) : + x ||| y = y ||| x := by + ext i + simp [Bool.or_comm] +instance : Std.Commutative (fun (x y : BitVec w) => x ||| y) := ⟨BitVec.or_comm⟩ /-! ### and -/ @@ -538,11 +554,13 @@ theorem and_assoc (x y z : BitVec w) : x &&& y &&& z = x &&& (y &&& z) := by ext i simp [Bool.and_assoc] +instance : Std.Associative (α := BitVec n) (· &&& ·) := ⟨BitVec.and_assoc⟩ theorem and_comm (x y : BitVec w) : x &&& y = y &&& x := by ext i simp [Bool.and_comm] +instance : Std.Commutative (fun (x y : BitVec w) => x &&& y) := ⟨BitVec.and_comm⟩ /-! ### xor -/ @@ -559,6 +577,15 @@ theorem and_comm (x y : BitVec w) : rw [← testBit_toNat, getLsb, getLsb] simp +@[simp] theorem getMsb_xor {x y : BitVec w} : + (x ^^^ y).getMsb i = (xor (x.getMsb i) (y.getMsb i)) := by + simp only [getMsb] + by_cases h : i < w <;> simp [h] + +@[simp] theorem msb_xor {x y : BitVec w} : + (x ^^^ y).msb = (xor x.msb y.msb) := by + simp [BitVec.msb] + @[simp] theorem truncate_xor {x y : BitVec w} : (x ^^^ y).truncate k = x.truncate k ^^^ y.truncate k := by ext @@ -568,6 +595,13 @@ theorem xor_assoc (x y z : BitVec w) : x ^^^ y ^^^ z = x ^^^ (y ^^^ z) := by ext i simp [Bool.xor_assoc] +instance : Std.Associative (fun (x y : BitVec w) => x ^^^ y) := ⟨BitVec.xor_assoc⟩ + +theorem xor_comm (x y : BitVec w) : + x ^^^ y = y ^^^ x := by + ext i + simp [Bool.xor_comm] +instance : Std.Commutative (fun (x y : BitVec w) => x ^^^ y) := ⟨BitVec.xor_comm⟩ /-! ### not -/ @@ -651,6 +685,27 @@ theorem zero_shiftLeft (n : Nat) : 0#w <<< n = 0#w := by cases h₁ : decide (i < m) <;> cases h₂ : decide (n ≤ i) <;> cases h₃ : decide (i < n) all_goals { simp_all <;> omega } +theorem shiftLeft_xor_distrib (x y : BitVec w) (n : Nat) : + (x ^^^ y) <<< n = (x <<< n) ^^^ (y <<< n) := by + ext i + simp only [getLsb_shiftLeft, Fin.is_lt, decide_True, Bool.true_and, getLsb_xor] + by_cases h : i < n + <;> simp [h] + +theorem shiftLeft_and_distrib (x y : BitVec w) (n : Nat) : + (x &&& y) <<< n = (x <<< n) &&& (y <<< n) := by + ext i + simp only [getLsb_shiftLeft, Fin.is_lt, decide_True, Bool.true_and, getLsb_and] + by_cases h : i < n + <;> simp [h] + +theorem shiftLeft_or_distrib (x y : BitVec w) (n : Nat) : + (x ||| y) <<< n = (x <<< n) ||| (y <<< n) := by + ext i + simp only [getLsb_shiftLeft, Fin.is_lt, decide_True, Bool.true_and, getLsb_or] + by_cases h : i < n + <;> simp [h] + @[simp] theorem getMsb_shiftLeft (x : BitVec w) (i) : (x <<< i).getMsb k = x.getMsb (k + i) := by simp only [getMsb, getLsb_shiftLeft] @@ -822,6 +877,30 @@ theorem sshiftRight_eq_of_msb_true {x : BitVec w} {s : Nat} (h : x.msb = true) : Nat.not_lt, decide_eq_true_eq] omega +theorem sshiftRight_xor_distrib (x y : BitVec w) (n : Nat) : + (x ^^^ y).sshiftRight n = (x.sshiftRight n) ^^^ (y.sshiftRight n) := by + ext i + simp only [getLsb_sshiftRight, getLsb_xor, msb_xor] + split + <;> by_cases w ≤ i + <;> simp [*] + +theorem sshiftRight_and_distrib (x y : BitVec w) (n : Nat) : + (x &&& y).sshiftRight n = (x.sshiftRight n) &&& (y.sshiftRight n) := by + ext i + simp only [getLsb_sshiftRight, getLsb_and, msb_and] + split + <;> by_cases w ≤ i + <;> simp [*] + +theorem sshiftRight_or_distrib (x y : BitVec w) (n : Nat) : + (x ||| y).sshiftRight n = (x.sshiftRight n) ||| (y.sshiftRight n) := by + ext i + simp only [getLsb_sshiftRight, getLsb_or, msb_or] + split + <;> by_cases w ≤ i + <;> simp [*] + /-- The msb after arithmetic shifting right equals the original msb. -/ theorem sshiftRight_msb_eq_msb {n : Nat} {x : BitVec w} : (x.sshiftRight n).msb = x.msb := by @@ -857,6 +936,31 @@ theorem sshiftRight_add {x : BitVec w} {m n : Nat} : @[simp] theorem sshiftRight_eq' (x : BitVec w) : x.sshiftRight' y = x.sshiftRight y.toNat := rfl +/-! ### udiv -/ + +theorem udiv_eq {x y : BitVec n} : x.udiv y = BitVec.ofNat n (x.toNat / y.toNat) := by + have h : x.toNat / y.toNat < 2 ^ n := Nat.lt_of_le_of_lt (Nat.div_le_self ..) (by omega) + simp [udiv, bv_toNat, h, Nat.mod_eq_of_lt] + +@[simp, bv_toNat] +theorem toNat_udiv {x y : BitVec n} : (x.udiv y).toNat = x.toNat / y.toNat := by + simp only [udiv_eq] + by_cases h : y = 0 + · simp [h] + · rw [toNat_ofNat, Nat.mod_eq_of_lt] + exact Nat.lt_of_le_of_lt (Nat.div_le_self ..) (by omega) + +/-! ### umod -/ + +theorem umod_eq {x y : BitVec n} : + x.umod y = BitVec.ofNat n (x.toNat % y.toNat) := by + have h : x.toNat % y.toNat < 2 ^ n := Nat.lt_of_le_of_lt (Nat.mod_le _ _) x.isLt + simp [umod, bv_toNat, Nat.mod_eq_of_lt h] + +@[simp, bv_toNat] +theorem toNat_umod {x y : BitVec n} : + (x.umod y).toNat = x.toNat % y.toNat := rfl + /-! ### signExtend -/ /-- Equation theorem for `Int.sub` when both arguments are `Int.ofNat` -/ @@ -1248,6 +1352,19 @@ theorem neg_eq_not_add (x : BitVec w) : -x = ~~~x + 1 := by have hx : x.toNat < 2^w := x.isLt rw [Nat.sub_sub, Nat.add_comm 1 x.toNat, ← Nat.sub_sub, Nat.sub_add_cancel (by omega)] +@[simp] +theorem neg_neg {x : BitVec w} : - - x = x := by + by_cases h : x = 0#w + · simp [h] + · simp [bv_toNat, h] + +theorem neg_ne_iff_ne_neg {x y : BitVec w} : -x ≠ y ↔ x ≠ -y := by + constructor + all_goals + intro h h' + subst h' + simp at h + /-! ### mul -/ theorem mul_def {n} {x y : BitVec n} : x * y = (ofFin <| x.toFin * y.toFin) := by rfl diff --git a/src/Init/Data/List/Find.lean b/src/Init/Data/List/Find.lean index 8c25d028e688..63c8415dcb87 100644 --- a/src/Init/Data/List/Find.lean +++ b/src/Init/Data/List/Find.lean @@ -1,7 +1,8 @@ /- Copyright (c) 2014 Parikshit Khanna. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Parikshit Khanna, Jeremy Avigad, Leonardo de Moura, Floris van Doorn, Mario Carneiro +Authors: Parikshit Khanna, Jeremy Avigad, Leonardo de Moura, Floris van Doorn, Mario Carneiro, + Kim Morrison, Jannis Limperg -/ prelude import Init.Data.List.Lemmas @@ -135,14 +136,23 @@ where cases p head <;> simp only [cond_false, cond_true] exact findIdx_go_succ p tail (n + 1) -theorem findIdx_of_get?_eq_some {xs : List α} (w : xs.get? (xs.findIdx p) = some y) : p y := by +theorem findIdx_of_getElem?_eq_some {xs : List α} (w : xs[xs.findIdx p]? = some y) : p y := by induction xs with | nil => simp_all | cons x xs ih => by_cases h : p x <;> simp_all [findIdx_cons] +theorem findIdx_getElem {xs : List α} {w : xs.findIdx p < xs.length} : + p xs[xs.findIdx p] := + xs.findIdx_of_getElem?_eq_some (getElem?_eq_getElem w) + +@[deprecated findIdx_of_getElem?_eq_some (since := "2024-08-12")] +theorem findIdx_of_get?_eq_some {xs : List α} (w : xs.get? (xs.findIdx p) = some y) : p y := + findIdx_of_getElem?_eq_some (by simpa using w) + +@[deprecated findIdx_getElem (since := "2024-08-12")] theorem findIdx_get {xs : List α} {w : xs.findIdx p < xs.length} : p (xs.get ⟨xs.findIdx p, w⟩) := - xs.findIdx_of_get?_eq_some (get?_eq_get w) + xs.findIdx_of_getElem?_eq_some (getElem?_eq_getElem w) theorem findIdx_lt_length_of_exists {xs : List α} (h : ∃ x ∈ xs, p x) : xs.findIdx p < xs.length := by @@ -158,11 +168,89 @@ theorem findIdx_lt_length_of_exists {xs : List α} (h : ∃ x ∈ xs, p x) : obtain ⟨x', m', h'⟩ := h exact ih x' m' h' +theorem findIdx_getElem?_eq_getElem_of_exists {xs : List α} (h : ∃ x ∈ xs, p x) : + xs[xs.findIdx p]? = some (xs[xs.findIdx p]'(xs.findIdx_lt_length_of_exists h)) := + getElem?_eq_getElem (findIdx_lt_length_of_exists h) + +@[deprecated findIdx_getElem?_eq_getElem_of_exists (since := "2024-08-12")] theorem findIdx_get?_eq_get_of_exists {xs : List α} (h : ∃ x ∈ xs, p x) : xs.get? (xs.findIdx p) = some (xs.get ⟨xs.findIdx p, xs.findIdx_lt_length_of_exists h⟩) := get?_eq_get (findIdx_lt_length_of_exists h) - /-! ### findIdx? -/ +@[simp] +theorem findIdx_eq_length {p : α → Bool} {xs : List α} : + xs.findIdx p = xs.length ↔ ∀ x ∈ xs, p x = false := by + induction xs with + | nil => simp_all + | cons x xs ih => + rw [findIdx_cons, length_cons] + simp only [cond_eq_if] + split <;> simp_all [Nat.succ.injEq] + +theorem findIdx_le_length (p : α → Bool) {xs : List α} : xs.findIdx p ≤ xs.length := by + by_cases e : ∃ x ∈ xs, p x + · exact Nat.le_of_lt (findIdx_lt_length_of_exists e) + · simp at e + exact Nat.le_of_eq (findIdx_eq_length.mpr e) + +@[simp] +theorem findIdx_lt_length {p : α → Bool} {xs : List α} : + xs.findIdx p < xs.length ↔ ∃ x ∈ xs, p x := by + rw [← Decidable.not_iff_not, Nat.not_lt] + have := @Nat.le_antisymm_iff (xs.findIdx p) xs.length + simp only [findIdx_le_length, true_and] at this + rw [← this, findIdx_eq_length, not_exists] + simp only [Bool.not_eq_true, not_and] + +/-- `p` does not hold for elements with indices less than `xs.findIdx p`. -/ +theorem not_of_lt_findIdx {p : α → Bool} {xs : List α} {i : Nat} (h : i < xs.findIdx p) : + ¬p (xs[i]'(Nat.le_trans h (findIdx_le_length p))) := by + revert i + induction xs with + | nil => intro i h; rw [findIdx_nil] at h; simp at h + | cons x xs ih => + intro i h + have ho := h + rw [findIdx_cons] at h + have npx : ¬p x := by intro y; rw [y, cond_true] at h; simp at h + simp [npx, cond_false] at h + cases i.eq_zero_or_pos with + | inl e => simpa only [e, Fin.zero_eta, get_cons_zero] + | inr e => + have ipm := Nat.succ_pred_eq_of_pos e + have ilt := Nat.le_trans ho (findIdx_le_length p) + simp (config := { singlePass := true }) only [← ipm, getElem_cons_succ] + rw [← ipm, Nat.succ_lt_succ_iff] at h + simpa using ih h + +/-- If `¬ p xs[j]` for all `j < i`, then `i ≤ xs.findIdx p`. -/ +theorem le_findIdx_of_not {p : α → Bool} {xs : List α} {i : Nat} (h : i < xs.length) + (h2 : ∀ j (hji : j < i), ¬p (xs[j]'(Nat.lt_trans hji h))) : i ≤ xs.findIdx p := by + apply Decidable.byContradiction + intro f + simp only [Nat.not_le] at f + exact absurd (@findIdx_getElem _ p xs (Nat.lt_trans f h)) (h2 (xs.findIdx p) f) + +/-- If `¬ p xs[j]` for all `j ≤ i`, then `i < xs.findIdx p`. -/ +theorem lt_findIdx_of_not {p : α → Bool} {xs : List α} {i : Nat} (h : i < xs.length) + (h2 : ∀ j (hji : j ≤ i), ¬p (xs.get ⟨j, Nat.lt_of_le_of_lt hji h⟩)) : i < xs.findIdx p := by + apply Decidable.byContradiction + intro f + simp only [Nat.not_lt] at f + exact absurd (@findIdx_getElem _ p xs (Nat.lt_of_le_of_lt f h)) (h2 (xs.findIdx p) f) + +/-- `xs.findIdx p = i` iff `p xs[i]` and `¬ p xs [j]` for all `j < i`. -/ +theorem findIdx_eq {p : α → Bool} {xs : List α} {i : Nat} (h : i < xs.length) : + xs.findIdx p = i ↔ p xs[i] ∧ ∀ j (hji : j < i), ¬p (xs[j]'(Nat.lt_trans hji h)) := by + refine ⟨fun f ↦ ⟨f ▸ (@findIdx_getElem _ p xs (f ▸ h)), fun _ hji ↦ not_of_lt_findIdx (f ▸ hji)⟩, + fun ⟨h1, h2⟩ ↦ ?_⟩ + apply Nat.le_antisymm _ (le_findIdx_of_not h h2) + apply Decidable.byContradiction + intro h3 + simp at h3 + exact not_of_lt_findIdx h3 h1 + +/-! ### findIdx? -/ @[simp] theorem findIdx?_nil : ([] : List α).findIdx? p i = none := rfl diff --git a/src/Init/Data/UInt/Lemmas.lean b/src/Init/Data/UInt/Lemmas.lean index 2c8b97923c4d..3500dc24cdb8 100644 --- a/src/Init/Data/UInt/Lemmas.lean +++ b/src/Init/Data/UInt/Lemmas.lean @@ -47,11 +47,11 @@ protected theorem ne_of_val_ne {a b : $typeName} (h : a.val ≠ b.val) : a ≠ b open $typeName (ne_of_val_ne) in protected theorem ne_of_lt {a b : $typeName} (h : a < b) : a ≠ b := ne_of_val_ne (Fin.ne_of_lt h) -@[simp] protected theorem zero_toNat : (0 : $typeName).toNat = 0 := Nat.zero_mod _ -@[simp] protected theorem mod_toNat (a b : $typeName) : (a % b).toNat = a.toNat % b.toNat := Fin.mod_val .. -@[simp] protected theorem div_toNat (a b : $typeName) : (a / b).toNat = a.toNat / b.toNat := Fin.div_val .. -@[simp] protected theorem sub_toNat_of_le (a b : $typeName) : b ≤ a → (a - b).toNat = a.toNat - b.toNat := Fin.sub_val_of_le -@[simp] protected theorem modn_toNat (a : $typeName) (b : Nat) : (a.modn b).toNat = a.toNat % b := Fin.modn_val .. +@[simp] protected theorem toNat_zero : (0 : $typeName).toNat = 0 := Nat.zero_mod _ +@[simp] protected theorem toNat_mod (a b : $typeName) : (a % b).toNat = a.toNat % b.toNat := Fin.mod_val .. +@[simp] protected theorem toNat_div (a b : $typeName) : (a / b).toNat = a.toNat / b.toNat := Fin.div_val .. +@[simp] protected theorem toNat_sub_of_le (a b : $typeName) : b ≤ a → (a - b).toNat = a.toNat - b.toNat := Fin.sub_val_of_le +@[simp] protected theorem toNat_modn (a : $typeName) (b : Nat) : (a.modn b).toNat = a.toNat % b := Fin.modn_val .. protected theorem modn_lt {m : Nat} : ∀ (u : $typeName), m > 0 → toNat (u % m) < m | ⟨u⟩, h => Fin.modn_lt u h open $typeName (modn_lt) in @@ -69,3 +69,28 @@ declare_uint_theorems UInt16 declare_uint_theorems UInt32 declare_uint_theorems UInt64 declare_uint_theorems USize + +@[deprecated (since := "2024-06-23")] protected abbrev UInt8.zero_toNat := @UInt8.toNat_zero +@[deprecated (since := "2024-06-23")] protected abbrev UInt8.div_toNat := @UInt8.toNat_div +@[deprecated (since := "2024-06-23")] protected abbrev UInt8.mod_toNat := @UInt8.toNat_mod +@[deprecated (since := "2024-06-23")] protected abbrev UInt8.modn_toNat := @UInt8.toNat_modn + +@[deprecated (since := "2024-06-23")] protected abbrev UInt16.zero_toNat := @UInt16.toNat_zero +@[deprecated (since := "2024-06-23")] protected abbrev UInt16.div_toNat := @UInt16.toNat_div +@[deprecated (since := "2024-06-23")] protected abbrev UInt16.mod_toNat := @UInt16.toNat_mod +@[deprecated (since := "2024-06-23")] protected abbrev UInt16.modn_toNat := @UInt16.toNat_modn + +@[deprecated (since := "2024-06-23")] protected abbrev UInt32.zero_toNat := @UInt32.toNat_zero +@[deprecated (since := "2024-06-23")] protected abbrev UInt32.div_toNat := @UInt32.toNat_div +@[deprecated (since := "2024-06-23")] protected abbrev UInt32.mod_toNat := @UInt32.toNat_mod +@[deprecated (since := "2024-06-23")] protected abbrev UInt32.modn_toNat := @UInt32.toNat_modn + +@[deprecated (since := "2024-06-23")] protected abbrev UInt64.zero_toNat := @UInt64.toNat_zero +@[deprecated (since := "2024-06-23")] protected abbrev UInt64.div_toNat := @UInt64.toNat_div +@[deprecated (since := "2024-06-23")] protected abbrev UInt64.mod_toNat := @UInt64.toNat_mod +@[deprecated (since := "2024-06-23")] protected abbrev UInt64.modn_toNat := @UInt64.toNat_modn + +@[deprecated (since := "2024-06-23")] protected abbrev USize.zero_toNat := @USize.toNat_zero +@[deprecated (since := "2024-06-23")] protected abbrev USize.div_toNat := @USize.toNat_div +@[deprecated (since := "2024-06-23")] protected abbrev USize.mod_toNat := @USize.toNat_mod +@[deprecated (since := "2024-06-23")] protected abbrev USize.modn_toNat := @USize.toNat_modn diff --git a/src/Init/Omega/Int.lean b/src/Init/Omega/Int.lean index f36e15f876df..c21a65e27331 100644 --- a/src/Init/Omega/Int.lean +++ b/src/Init/Omega/Int.lean @@ -83,6 +83,10 @@ theorem neg_congr {a b : Int} (h₁ : a = b) : -a = -b := by theorem lt_of_gt {x y : Int} (h : x > y) : y < x := gt_iff_lt.mp h theorem le_of_ge {x y : Int} (h : x ≥ y) : y ≤ x := ge_iff_le.mp h +theorem ofNat_mul_nonneg {a b : Nat} : 0 ≤ (a : Int) * b := by + rw [← Int.ofNat_mul] + exact Int.ofNat_zero_le (a * b) + theorem ofNat_sub_eq_zero {b a : Nat} (h : ¬ b ≤ a) : ((a - b : Nat) : Int) = 0 := Int.ofNat_eq_zero.mpr (Nat.sub_eq_zero_of_le (Nat.le_of_lt (Nat.not_le.mp h))) diff --git a/src/Init/System/IO.lean b/src/Init/System/IO.lean index 662f5ca98b9f..ba4ac8e298df 100644 --- a/src/Init/System/IO.lean +++ b/src/Init/System/IO.lean @@ -496,7 +496,7 @@ partial def lines (fname : FilePath) : IO (Array String) := do pure lines else if line.back == '\n' then let line := line.dropRight 1 - let line := if System.Platform.isWindows && line.back == '\x0d' then line.dropRight 1 else line + let line := if line.back == '\r' then line.dropRight 1 else line read <| lines.push line else pure <| lines.push line diff --git a/src/Init/WF.lean b/src/Init/WF.lean index d73bdcff50ff..4ad3a6f1707a 100644 --- a/src/Init/WF.lean +++ b/src/Init/WF.lean @@ -173,7 +173,7 @@ namespace Nat -- less-than is well-founded def lt_wfRel : WellFoundedRelation Nat where - rel := Nat.lt + rel := (· < ·) wf := by apply WellFounded.intro intro n diff --git a/src/Lean/Compiler/IR/EmitC.lean b/src/Lean/Compiler/IR/EmitC.lean index 729ebb8112d4..0300971705a9 100644 --- a/src/Lean/Compiler/IR/EmitC.lean +++ b/src/Lean/Compiler/IR/EmitC.lean @@ -91,19 +91,6 @@ def toCInitName (n : Name) : M String := do def emitCInitName (n : Name) : M Unit := toCInitName n >>= emit -def shouldExport (n : Name) : Bool := - -- HACK: exclude symbols very unlikely to be used by the interpreter or other consumers of - -- libleanshared to avoid Windows symbol limit - !(`Lean.Compiler.LCNF).isPrefixOf n && - !(`Lean.IR).isPrefixOf n && - -- Lean.Server.findModuleRefs is used in SubVerso, and the contents of RequestM are used by the - -- full Verso as well as anything else that extends the LSP server. - (!(`Lean.Server.Watchdog).isPrefixOf n) && - (!(`Lean.Server.ImportCompletion).isPrefixOf n) && - (!(`Lean.Server.Completion).isPrefixOf n) - - - def emitFnDeclAux (decl : Decl) (cppBaseName : String) (isExternal : Bool) : M Unit := do let ps := decl.params let env ← getEnv @@ -112,7 +99,7 @@ def emitFnDeclAux (decl : Decl) (cppBaseName : String) (isExternal : Bool) : M U else if isExternal then emit "extern " else emit "LEAN_EXPORT " else - if !isExternal && shouldExport decl.name then emit "LEAN_EXPORT " + if !isExternal then emit "LEAN_EXPORT " emit (toCType decl.resultType ++ " " ++ cppBaseName) unless ps.isEmpty do emit "(" @@ -658,7 +645,7 @@ def emitDeclAux (d : Decl) : M Unit := do let baseName ← toCName f; if xs.size == 0 then emit "static " - else if shouldExport f then + else emit "LEAN_EXPORT " -- make symbol visible to the interpreter emit (toCType t); emit " "; if xs.size > 0 then diff --git a/src/Lean/Elab/Command.lean b/src/Lean/Elab/Command.lean index f756f98a3c37..d47a12a4aa5b 100644 --- a/src/Lean/Elab/Command.lean +++ b/src/Lean/Elab/Command.lean @@ -312,7 +312,11 @@ def runLinters (stx : Syntax) : CommandElabM Unit := do try linter.run stx catch ex => - logException ex + match ex with + | Exception.error ref msg => + logException (.error ref m!"linter {linter.name} failed: {msg}") + | Exception.internal _ _ => + logException ex finally modify fun s => { savedState with messages := s.messages } diff --git a/src/Lean/Elab/MutualDef.lean b/src/Lean/Elab/MutualDef.lean index fb6f78d41f8d..e957ad2a6756 100644 --- a/src/Lean/Elab/MutualDef.lean +++ b/src/Lean/Elab/MutualDef.lean @@ -226,10 +226,16 @@ private def elabHeaders (views : Array DefView) headers := headers.push newHeader return headers where - getBodyTerm? (stx : Syntax) : Option Syntax := + getBodyTerm? (stx : Syntax) : Option Syntax := do -- TODO: does not work with partial syntax - --| `(Parser.Command.declVal| := $body $_suffix:suffix $[$_where]?) => body - guard (stx.isOfKind ``Parser.Command.declValSimple) *> some stx[1] + --| `(Parser.Command.declVal| := $body $_suffix:suffix) => body + guard (stx.isOfKind ``Parser.Command.declValSimple) + let body := stx[1] + let whereDeclsOpt := stx[3] + -- We currently disable incrementality in presence of `where` as we would have to handle the + -- generated leading `let rec` specially + guard whereDeclsOpt.isNone + return body /-- Creates snapshot task with appropriate range from body syntax and promise. -/ mkBodyTask (body : Syntax) (new : IO.Promise (Option BodyProcessedSnapshot)) : diff --git a/src/Lean/Elab/Print.lean b/src/Lean/Elab/Print.lean index 4a827b57310f..2fb169eab189 100644 --- a/src/Lean/Elab/Print.lean +++ b/src/Lean/Elab/Print.lean @@ -78,19 +78,19 @@ private def printStructure (id : Name) (levelParams : List Name) (numParams : Na logInfo m where doFields := liftTermElabM do - forallTelescope (← getConstInfo id).type fun params type => - withLocalDeclD `self type fun self => do + forallTelescope (← getConstInfo id).type fun params _ => + withLocalDeclD `self (mkAppN (Expr.const id (levelParams.map .param)) params) fun self => do let params := params.push self - let mut m : Format := "" + let mut m : MessageData := "" for field in fields do match getProjFnForField? (← getEnv) id field with | some proj => let field : Format := if isPrivateName proj then "private " ++ toString field else toString field let cinfo ← getConstInfo proj let ftype ← instantiateForall cinfo.type params - m := m ++ Format.line ++ field ++ " : " ++ (← ppExpr ftype) -- Why ppExpr here? + m := m ++ Format.line ++ field ++ " : " ++ ftype | none => panic! "missing structure field info" - return m + addMessageContext m private def printIdCore (id : Name) : CommandElabM Unit := do let env ← getEnv diff --git a/src/Lean/Elab/Tactic/Ext.lean b/src/Lean/Elab/Tactic/Ext.lean index 6e8dab1237fc..eba9fb37839f 100644 --- a/src/Lean/Elab/Tactic/Ext.lean +++ b/src/Lean/Elab/Tactic/Ext.lean @@ -340,7 +340,7 @@ Runs continuation `k` on each subgoal. -/ def withExtN [Monad m] [MonadLiftT TermElabM m] [MonadExcept Exception m] (g : MVarId) (pats : List (TSyntax `rcasesPat)) (k : MVarId → List (TSyntax `rcasesPat) → m Nat) - (depth := 1000000) (failIfUnchanged := true) : m Nat := + (depth := 100) (failIfUnchanged := true) : m Nat := match depth with | 0 => k g pats | depth+1 => do @@ -358,7 +358,7 @@ This is built on top of `withExtN`, running in `TermElabM` to build the list of (And, for each goal, the patterns consumed.) -/ def extCore (g : MVarId) (pats : List (TSyntax `rcasesPat)) - (depth := 1000000) (failIfUnchanged := true) : + (depth := 100) (failIfUnchanged := true) : TermElabM (Nat × Array (MVarId × List (TSyntax `rcasesPat))) := do StateT.run (m := TermElabM) (s := #[]) (withExtN g pats (fun g qs => modify (·.push (g, qs)) *> pure 0) depth failIfUnchanged) @@ -367,7 +367,7 @@ def extCore (g : MVarId) (pats : List (TSyntax `rcasesPat)) match stx with | `(tactic| ext $pats* $[: $n]?) => do let pats := RCases.expandRIntroPats pats - let depth := n.map (·.getNat) |>.getD 1000000 + let depth := n.map (·.getNat) |>.getD 100 let (used, gs) ← extCore (← getMainGoal) pats.toList depth if RCases.linter.unusedRCasesPattern.get (← getOptions) then if used < pats.size then diff --git a/src/Lean/Elab/Tactic/Omega/Frontend.lean b/src/Lean/Elab/Tactic/Omega/Frontend.lean index 4f39b30f3e97..9c20975a69b1 100644 --- a/src/Lean/Elab/Tactic/Omega/Frontend.lean +++ b/src/Lean/Elab/Tactic/Omega/Frontend.lean @@ -101,9 +101,9 @@ partial def asLinearCombo (e : Expr) : OmegaM (LinearCombo × OmegaM Expr × Std trace[omega] "Found in cache: {e}" return (lc, prf, ∅) | none => - let r ← asLinearComboImpl e - modifyThe Cache fun cache => (cache.insert e (r.1, r.2.1.run' cache)) - pure r + let (lc, proof, r) ← asLinearComboImpl e + modifyThe Cache fun cache => (cache.insert e (lc, proof.run' cache)) + pure (lc, proof, r) /-- Translates an expression into a `LinearCombo`. @@ -255,16 +255,9 @@ where | (``Nat.succ, #[n]) => rewrite e (.app (.const ``Int.ofNat_succ []) n) | (``HAdd.hAdd, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_add []) a b) | (``HMul.hMul, #[_, _, _, _, a, b]) => - -- Don't push the cast into a multiplication unless it produces a non-trivial linear combination. - let r? ← commitWhen do - let (lc, prf, r) ← rewrite e (mkApp2 (.const ``Int.ofNat_mul []) a b) - if lc.isAtom then - pure (none, false) - else - pure (some (lc, prf, r), true) - match r? with - | some r => pure r - | none => mkAtomLinearCombo e + let (lc, prf, r) ← rewrite e (mkApp2 (.const ``Int.ofNat_mul []) a b) + -- Add the fact that the multiplication is non-negative. + pure (lc, prf, r.insert (mkApp2 (.const ``Int.ofNat_mul_nonneg []) a b)) | (``HDiv.hDiv, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_ediv []) a b) | (``OfNat.ofNat, #[_, n, _]) => rewrite e (.app (.const ``Int.natCast_ofNat []) n) | (``HMod.hMod, #[_, _, _, _, a, b]) => rewrite e (mkApp2 (.const ``Int.ofNat_emod []) a b) diff --git a/src/Lean/Language/Lean.lean b/src/Lean/Language/Lean.lean index d0a464ace5ea..de6e63f6890b 100644 --- a/src/Lean/Language/Lean.lean +++ b/src/Lean/Language/Lean.lean @@ -541,30 +541,25 @@ where stx := .missing parserState := {} elabSnap := { range? := stx.getRange?, task := elabPromise.result } - finishedSnap := { range? := none, task := finishedPromise.result.map fun finishedSnap => { - diagnostics := finishedSnap.diagnostics - infoTree? := none - cmdState := { - env := initEnv - maxRecDepth := 0 - } - }} + finishedSnap tacticCache } else { diagnostics, stx, parserState, tacticCache elabSnap := { range? := stx.getRange?, task := elabPromise.result } finishedSnap } - prom.resolve <| .mk (nextCmdSnap? := next?.map ({ range? := some ⟨parserState.pos, ctx.input.endPos⟩, task := ·.result })) data - doElab stx cmdState beginPos + prom.resolve <| .mk (nextCmdSnap? := next?.map + ({ range? := some ⟨parserState.pos, ctx.input.endPos⟩, task := ·.result })) data + let cmdState ← doElab stx cmdState beginPos { old? := old?.map fun old => ⟨old.data.stx, old.data.elabSnap⟩, new := elabPromise } - finishedPromise tacticCache ctx + finishedPromise tacticCache initEnv ctx if let some next := next? then - parseCmd none parserState finishedSnap.get.cmdState initEnv next ctx + parseCmd none parserState cmdState initEnv next ctx doElab (stx : Syntax) (cmdState : Command.State) (beginPos : String.Pos) (snap : SnapshotBundle DynamicSnapshot) (finishedPromise : IO.Promise CommandFinishedSnapshot) - (tacticCache : IO.Ref Tactic.Cache) : LeanProcessingM Unit := do + (tacticCache : IO.Ref Tactic.Cache) (initEnv : Environment) : + LeanProcessingM Command.State := do let ctx ← read let scope := cmdState.scopes.head! let cmdStateRef ← IO.mkRef { cmdState with messages := .empty } @@ -601,11 +596,18 @@ where let cmdState := { cmdState with messages } -- definitely resolve eventually snap.new.resolve <| .ofTyped { diagnostics := .empty : SnapshotLeaf } + let minimal := internal.minimalSnapshots.get scope.opts && !Parser.isTerminalCommand stx finishedPromise.resolve { diagnostics := (← Snapshot.Diagnostics.ofMessageLog cmdState.messages) - infoTree? := some cmdState.infoState.trees[0]! - cmdState + infoTree? := guard (!minimal) *> cmdState.infoState.trees[0]! + cmdState := if minimal then { + env := initEnv + maxRecDepth := 0 + } else cmdState } + -- The reported `cmdState` in the snapshot may be minimized as seen above, so we return the full + -- state here for further processing on the same thread + return cmdState /-- Convenience function for tool uses of the language processor that skips header handling. diff --git a/src/Lean/Meta/Tactic/Simp/BuiltinSimprocs/Nat.lean b/src/Lean/Meta/Tactic/Simp/BuiltinSimprocs/Nat.lean index f71e70228345..fe9bd495aadc 100644 --- a/src/Lean/Meta/Tactic/Simp/BuiltinSimprocs/Nat.lean +++ b/src/Lean/Meta/Tactic/Simp/BuiltinSimprocs/Nat.lean @@ -55,8 +55,9 @@ builtin_dsimproc [simp, seval] reduceDiv ((_ / _ : Nat)) := reduceBin ``HDiv.hDi builtin_dsimproc [simp, seval] reduceMod ((_ % _ : Nat)) := reduceBin ``HMod.hMod 6 (· % ·) builtin_dsimproc [simp, seval] reducePow ((_ ^ _ : Nat)) := fun e => do - let some n ← fromExpr? e.appFn!.appArg! | return .continue - let some m ← fromExpr? e.appArg! | return .continue + let_expr HPow.hPow _ _ _ _ n m := e | return .continue + let some n ← fromExpr? n | return .continue + let some m ← fromExpr? m | return .continue unless (← checkExponent m) do return .continue return .done <| toExpr (n ^ m) diff --git a/src/Lean/PrettyPrinter/Delaborator/Basic.lean b/src/Lean/PrettyPrinter/Delaborator/Basic.lean index 3c5ca19fc23c..bf0de8759ebd 100644 --- a/src/Lean/PrettyPrinter/Delaborator/Basic.lean +++ b/src/Lean/PrettyPrinter/Delaborator/Basic.lean @@ -123,6 +123,12 @@ unsafe def mkDelabAttribute : IO (KeyedDeclsAttribute Delab) := } `Lean.PrettyPrinter.Delaborator.delabAttribute @[builtin_init mkDelabAttribute] opaque delabAttribute : KeyedDeclsAttribute Delab +macro "app_delab" id:ident : attr => do + match ← Macro.resolveGlobalName id.getId with + | [] => Macro.throwErrorAt id s!"unknown declaration '{id.getId}'" + | [(c, [])] => `(attr| delab $(mkIdentFrom (canonical := true) id (`app ++ c))) + | _ => Macro.throwErrorAt id s!"ambiguous declaration '{id.getId}'" + def getExprKind : DelabM Name := do let e ← getExpr pure $ match e with diff --git a/src/Lean/PrettyPrinter/Delaborator/FieldNotation.lean b/src/Lean/PrettyPrinter/Delaborator/FieldNotation.lean index be5e298db0aa..84aefe4b1916 100644 --- a/src/Lean/PrettyPrinter/Delaborator/FieldNotation.lean +++ b/src/Lean/PrettyPrinter/Delaborator/FieldNotation.lean @@ -89,6 +89,8 @@ def fieldNotationCandidate? (f : Expr) (args : Array Expr) (useGeneralizedFieldN -- Handle structure projections try let (field, numParams, _) ← projInfo c + unless numParams + 1 ≤ args.size do return none + unless (← whnf <| ← inferType args[numParams]!).isAppOf c.getPrefix do return none return (field, numParams) catch _ => pure () -- Handle generalized field notation diff --git a/src/Lean/Runtime.lean b/src/Lean/Runtime.lean index 4cac6f13794b..f789697858a3 100644 --- a/src/Lean/Runtime.lean +++ b/src/Lean/Runtime.lean @@ -14,10 +14,16 @@ opaque closureMaxArgsFn : Unit → Nat @[extern "lean_max_small_nat"] opaque maxSmallNatFn : Unit → Nat +@[extern "lean_libuv_version"] +opaque libUVVersionFn : Unit → Nat + def closureMaxArgs : Nat := closureMaxArgsFn () def maxSmallNat : Nat := maxSmallNatFn () +def libUVVersion : Nat := + libUVVersionFn () + end Lean diff --git a/src/Std/Data/DHashMap/Internal/Index.lean b/src/Std/Data/DHashMap/Internal/Index.lean index e2b25b858b69..a6afa47b5697 100644 --- a/src/Std/Data/DHashMap/Internal/Index.lean +++ b/src/Std/Data/DHashMap/Internal/Index.lean @@ -48,7 +48,7 @@ cf. https://github.com/leanprover/lean4/issues/4157 ⟨(scrambleHash hash).toUSize &&& (sz.toUSize - 1), by -- Making this proof significantly less painful will be a good test for our USize API by_cases h' : sz < USize.size - · rw [USize.and_toNat, ← USize.ofNat_one, USize.sub_toNat_of_le, USize.toNat_ofNat_of_lt] + · rw [USize.and_toNat, ← USize.ofNat_one, USize.toNat_sub_of_le, USize.toNat_ofNat_of_lt] · refine Nat.lt_of_le_of_lt Nat.and_le_right (Nat.sub_lt h ?_) rw [USize.toNat_ofNat_of_lt] · exact Nat.one_pos diff --git a/src/Std/Sat.lean b/src/Std/Sat.lean index 0009cdfd426e..e7119caa1281 100644 --- a/src/Std/Sat.lean +++ b/src/Std/Sat.lean @@ -4,4 +4,5 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Henrik Böving -/ prelude +import Std.Sat.AIG import Std.Sat.CNF diff --git a/src/Std/Sat/AIG.lean b/src/Std/Sat/AIG.lean new file mode 100644 index 000000000000..f835d56631c1 --- /dev/null +++ b/src/Std/Sat/AIG.lean @@ -0,0 +1,19 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Basic +import Std.Sat.AIG.LawfulOperator +import Std.Sat.AIG.Lemmas +import Std.Sat.AIG.Cached +import Std.Sat.AIG.CachedLemmas +import Std.Sat.AIG.CachedGates +import Std.Sat.AIG.CachedGatesLemmas +import Std.Sat.AIG.CNF +import Std.Sat.AIG.Relabel +import Std.Sat.AIG.RelabelNat +import Std.Sat.AIG.RefVec +import Std.Sat.AIG.RefVecOperator +import Std.Sat.AIG.LawfulVecOperator +import Std.Sat.AIG.If diff --git a/src/Std/Sat/AIG/Basic.lean b/src/Std/Sat/AIG/Basic.lean new file mode 100644 index 000000000000..2a94ee87e487 --- /dev/null +++ b/src/Std/Sat/AIG/Basic.lean @@ -0,0 +1,510 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Data.HashMap +import Std.Data.HashSet + +namespace Std +namespace Sat + +/-! +This module contains the basic definitions for an AIG (And Inverter Graph) in the style of AIGNET, +as described in https://arxiv.org/pdf/1304.7861.pdf section 3. It consists of an AIG definition, +a description of its semantics and basic operations to construct nodes in the AIG. +-/ + +variable {α : Type} [Hashable α] [DecidableEq α] + +namespace AIG + +/-- +A circuit node. These are not recursive but instead contain indices into an `AIG`, with inputs indexed by `α`. +-/ +inductive Decl (α : Type) where + /-- + A node with a constant output value. + -/ + | const (b : Bool) + /-- + An input node to the circuit. + -/ + | atom (idx : α) + /-- + An AIG gate with configurable input nodes and polarity. `l` and `r` are the + input node indices while `linv` and `rinv` say whether there is an inverter on + the left and right inputs, respectively. + -/ + | gate (l r : Nat) (linv rinv : Bool) + deriving Hashable, Repr, DecidableEq, Inhabited + + +/-- +`Cache.WF xs` is a predicate asserting that a `cache : HashMap (Decl α) Nat` is a valid lookup +cache for `xs : Array (Decl α)`, that is, whenever `cache.find? decl` returns an index into +`xs : Array Decl`, `xs[index] = decl`. Note that this predicate does not force the cache to be +complete, if there is no entry in the cache for some node, it can still exist in the AIG. +-/ +inductive Cache.WF : Array (Decl α) → HashMap (Decl α) Nat → Prop where + /-- + An empty `Cache` is valid for any `Array Decl` as it never has a hit. + -/ + | empty : WF decls {} + /-- + Given a `cache`, valid with respect to some `decls`, we can extend the `decls` without + extending the cache and remain valid. + -/ + | push_id (h : WF decls cache) : WF (decls.push decl) cache + /-- + Given a `cache`, valid with respect to some `decls`, we can extend the `decls` + and the `cache` at the same time with the same values and remain valid. + -/ + | push_cache (h : WF decls cache) : WF (decls.push decl) (cache.insert decl decls.size) + +/-- +A cache for reusing elements from `decls` if they are available. +-/ +def Cache (α : Type) [DecidableEq α] [Hashable α] (decls : Array (Decl α)) := + { map : HashMap (Decl α) Nat // Cache.WF decls map } + +/-- +Create an empty `Cache`, valid with respect to any `Array Decl`. +-/ +@[irreducible] +def Cache.empty (decls : Array (Decl α)) : Cache α decls := ⟨{}, WF.empty⟩ + +@[inherit_doc Cache.WF.push_id, irreducible] +def Cache.noUpdate (cache : Cache α decls) : Cache α (decls.push decl) := + ⟨cache.val, Cache.WF.push_id cache.property⟩ + +/- +We require the `decls` as an explicit argument because we use `decls.size` so accidentally mutating +`decls` before calling `Cache.insert` will destroy `decl` linearity. +-/ +@[inherit_doc Cache.WF.push_cache, irreducible] +def Cache.insert (decls : Array (Decl α)) (cache : Cache α decls) (decl : Decl α) : + Cache α (decls.push decl) := + ⟨cache.val.insert decl decls.size, Cache.WF.push_cache cache.property⟩ + +/-- +Contains the index of `decl` in `decls` along with a proof that the index is indeed correct. +-/ +structure CacheHit (decls : Array (Decl α)) (decl : Decl α) where + idx : Nat + hbound : idx < decls.size + hvalid : decls[idx]'hbound = decl + +/-- +For a `c : Cache α decls`, any index `idx` that is a cache hit for some `decl` is within bounds of `decls` (i.e. `idx < decls.size`). +-/ +theorem Cache.get?_bounds {decls : Array (Decl α)} {idx : Nat} (c : Cache α decls) (decl : Decl α) + (hfound : c.val[decl]? = some idx) : + idx < decls.size := by + rcases c with ⟨cache, hcache⟩ + induction hcache with + | empty => simp at hfound + | push_id wf ih => + specialize ih hfound + simp + omega + | @push_cache _ _ decl' wf ih => + simp only [HashMap.getElem?_insert, beq_iff_eq] at hfound + split at hfound <;> rename_i h + · subst h + simp_all + · specialize ih hfound + simp + omega + +/-- +If `Cache.get? decl` returns `some i` then `decls[i] = decl` holds. +-/ +theorem Cache.get?_property {decls : Array (Decl α)} {idx : Nat} (c : Cache α decls) (decl : Decl α) + (hfound : c.val[decl]? = some idx) : + decls[idx]'(Cache.get?_bounds c decl hfound) = decl := by + rcases c with ⟨cache, hcache⟩ + induction hcache generalizing decl with + | empty => simp at hfound + | push_id wf ih => + rw [Array.get_push] + split + . apply ih + simp [hfound] + . next hbounds => + exfalso + apply hbounds + specialize ih _ hfound + apply Array.lt_of_getElem + assumption + | push_cache wf ih => + rename_i decl' + rw [Array.get_push] + split + . simp only [HashMap.getElem?_insert] at hfound + match heq : decl == decl' with + | true => + simp only [beq_iff_eq] at heq + simp [heq] at hfound + omega + | false => + apply ih + simpa [BEq.symm_false heq] using hfound + . next hbounds => + simp only [HashMap.getElem?_insert] at hfound + match heq : decl == decl' with + | true => + apply Eq.symm + simpa using heq + | false => + exfalso + apply hbounds + simp only [BEq.symm_false heq, cond_false] at hfound + specialize ih _ hfound + apply Array.lt_of_getElem + assumption + +/-- +Lookup a `Decl` in a `Cache`. +-/ +opaque Cache.get? (cache : Cache α decls) (decl : Decl α) : Option (CacheHit decls decl) := + /- + This function is marked as `opaque` to make sure it never, ever gets unfolded anywhere. + Unfolding it will often cause `HashMap.find?` to be symbolically evaluated by reducing + it either in `whnf` or in the kernel. This causes *huge* performance issues in practice. + The function can still be fully verified as all the proofs we need are in `CacheHit`. + -/ + match hfound : cache.val[decl]? with + | some hit => + some ⟨hit, Cache.get?_bounds _ _ hfound, Cache.get?_property _ _ hfound⟩ + | none => none + +/-- +An `Array Decl` is a Direct Acyclic Graph (DAG) if a gate at index `i` only points to nodes with index lower than `i`. +-/ +def IsDAG (α : Type) (decls : Array (Decl α)) : Prop := + ∀ {i lhs rhs linv rinv} (h : i < decls.size), + decls[i] = .gate lhs rhs linv rinv → lhs < i ∧ rhs < i + +/-- +The empty array is a DAG. +-/ +theorem IsDAG.empty {α : Type} : IsDAG α #[] := by + intro i lhs rhs linv rinv h + simp only [Array.size_toArray, List.length_nil] at h + omega + +end AIG + +/-- +An And Inverter Graph together with a cache for subterm sharing. +-/ +structure AIG (α : Type) [DecidableEq α] [Hashable α] where + /-- + The circuit itself as an `Array Decl` whose members have indices into said array. + -/ + decls : Array (AIG.Decl α) + /-- + The `Decl` cache, valid with respect to `decls`. + -/ + cache : AIG.Cache α decls + /-- + In order to be a valid AIG, `decls` must form a DAG. + -/ + invariant : AIG.IsDAG α decls + +namespace AIG + +/-- +An `AIG` with an empty AIG and cache. +-/ +def empty : AIG α := { decls := #[], cache := Cache.empty #[], invariant := IsDAG.empty } + +/-- +The atom `a` occurs in `aig`. +-/ +def Mem (a : α) (aig : AIG α) : Prop := (.atom a) ∈ aig.decls + +instance : Membership α (AIG α) where + mem := Mem + +/-- +A reference to a node within an AIG. This is the `AIG` analog of `Bool`. +-/ +structure Ref (aig : AIG α) where + gate : Nat + hgate : gate < aig.decls.size + +/-- +A `Ref` into `aig1` is also valid for `aig2` if `aig1` is smaller than `aig2`. +-/ +@[inline] +def Ref.cast {aig1 aig2 : AIG α} (ref : Ref aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + Ref aig2 := + { ref with hgate := by have := ref.hgate; omega } + +/-- +A pair of `Ref`s, useful for `LawfulOperator`s that act on two `Ref`s at a time. +-/ +structure BinaryInput (aig : AIG α) where + lhs : Ref aig + rhs : Ref aig + +/-- +The `Ref.cast` equivalent for `BinaryInput`. +-/ +@[inline] +def BinaryInput.cast {aig1 aig2 : AIG α} (input : BinaryInput aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + BinaryInput aig2 := + { input with lhs := input.lhs.cast h, rhs := input.rhs.cast h } + +/-- +A collection of 3 of `Ref`s, useful for `LawfulOperator`s that act on three `Ref`s at a time, +in particular multiplexer style functions. +-/ +structure TernaryInput (aig : AIG α) where + discr : Ref aig + lhs : Ref aig + rhs : Ref aig + +/-- +The `Ref.cast` equivalent for `TernaryInput`. +-/ +@[inline] +def TernaryInput.cast {aig1 aig2 : AIG α} (input : TernaryInput aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + TernaryInput aig2 := + { input with discr := input.discr.cast h, lhs := input.lhs.cast h, rhs := input.rhs.cast h } + +/-- +An entrypoint into an `AIG`. This can be used to evaluate a circuit, starting at a certain node, +with `AIG.denote` or to construct bigger circuits on top of this specific node. +-/ +structure Entrypoint (α : Type) [DecidableEq α] [Hashable α] where + /-- + The AIG that we are in. + -/ + aig : AIG α + /-- + The reference to the node in `aig` that this `Entrypoint` targets. + -/ + ref : Ref aig + +/-- +Transform an `Entrypoint` into a graphviz string. Useful for debugging purposes. +-/ +def toGraphviz {α : Type} [DecidableEq α] [ToString α] [Hashable α] (entry : Entrypoint α) : + String := + let ⟨⟨decls, _, hinv⟩, ⟨idx, h⟩⟩ := entry + let (dag, s) := go "" decls hinv idx h |>.run .empty + let nodes := s.fold (fun x y ↦ x ++ toGraphvizString decls y) "" + "Digraph AIG {" ++ nodes ++ dag ++ "}" +where + go {α : Type} [DecidableEq α] [ToString α] [Hashable α] (acc : String) (decls : Array (Decl α)) + (hinv : IsDAG α decls) (idx : Nat) (hidx : idx < decls.size) + : StateM (HashSet (Fin decls.size)) String := do + let fidx : Fin decls.size := Fin.mk idx hidx + if (← get).contains fidx then + return acc + modify (fun s ↦ s.insert fidx) + match elem : decls[idx] with + | Decl.const _ => return acc + | Decl.atom _ => return acc + | Decl.gate lidx ridx linv rinv => + let curr := s!"{idx} -> {lidx}{invEdgeStyle linv}; {idx} -> {ridx}{invEdgeStyle rinv};" + let hlr := hinv hidx elem + let laig ← go (acc ++ curr) decls hinv lidx (by omega) + go laig decls hinv ridx (by omega) + invEdgeStyle (isInv : Bool) : String := + if isInv then " [color=red]" else " [color=blue]" + toGraphvizString {α : Type} [DecidableEq α] [ToString α] [Hashable α] (decls : Array (Decl α)) + (idx : Fin decls.size) : String := + match decls[idx] with + | Decl.const b => s!"{idx} [label=\"{b}\", shape=box];" + | Decl.atom i => s!"{idx} [label=\"{i}\", shape=doublecircle];" + | Decl.gate _ _ _ _ => s!"{idx} [label=\"{idx} ∧\",shape=trapezium];" + +/-- +A vector of references into `aig`. This is the `AIG` analog of `BitVec`. +-/ +structure RefVec (aig : AIG α) (w : Nat) where + refs : Array Nat + hlen : refs.size = w + hrefs : ∀ (h : i < w), refs[i] < aig.decls.size + +/-- +A sequence of references bundled with their AIG. +-/ +structure RefVecEntry (α : Type) [DecidableEq α] [Hashable α] [DecidableEq α] (w : Nat) where + aig : AIG α + vec : RefVec aig w + +/-- +A `RefVec` bundled with constant distance to be shifted by. +-/ +structure ShiftTarget (aig : AIG α) (w : Nat) where + vec : AIG.RefVec aig w + distance : Nat + +/-- +A `RefVec` bundled with a `RefVec` as distance to be shifted by. +-/ +structure ArbitraryShiftTarget (aig : AIG α) (m : Nat) where + n : Nat + target : AIG.RefVec aig m + distance : AIG.RefVec aig n + +/-- +A `RefVec` to be extended to `newWidth`. +-/ +structure ExtendTarget (aig : AIG α) (newWidth : Nat) where + w : Nat + vec : AIG.RefVec aig w + +/-- +Evaluate an `AIG.Entrypoint` using some assignment for atoms. +-/ +def denote (assign : α → Bool) (entry : Entrypoint α) : Bool := + go entry.ref.gate entry.aig.decls assign entry.ref.hgate entry.aig.invariant +where + go (x : Nat) (decls : Array (Decl α)) (assign : α → Bool) (h1 : x < decls.size) + (h2 : IsDAG α decls) : + Bool := + match h3 : decls[x] with + | .const b => b + | .atom v => assign v + | .gate lhs rhs linv rinv => + have := h2 h1 h3 + let lval := go lhs decls assign (by omega) h2 + let rval := go rhs decls assign (by omega) h2 + xor lval linv && xor rval rinv + +/-- +Denotation of an `AIG` at a specific `Entrypoint`. +-/ +scoped syntax "⟦" term ", " term "⟧" : term + +/-- +Denotation of an `AIG` at a specific `Entrypoint` with the `Entrypoint` being constructed on the fly. +-/ +scoped syntax "⟦" term ", " term ", " term "⟧" : term + +macro_rules +| `(⟦$entry, $assign⟧) => `(denote $assign $entry) +| `(⟦$aig, $ref, $assign⟧) => `(denote $assign (Entrypoint.mk $aig $ref)) + +@[app_unexpander AIG.denote] +def unexpandDenote : Lean.PrettyPrinter.Unexpander + | `($(_) {aig := $aig, start := $start, inv := $hbound} $assign) => + `(⟦$aig, ⟨$start, $hbound⟩, $assign⟧) + | `($(_) $entry $assign) => `(⟦$entry, $assign⟧) + | _ => throw () + +/-- +The denotation of the sub-DAG in the `aig` at node `start` is false for all assignments. +-/ +def UnsatAt (aig : AIG α) (start : Nat) (h : start < aig.decls.size) : Prop := + ∀ assign, ⟦aig, ⟨start, h⟩, assign⟧ = false + +/-- +The denotation of the `Entrypoint` is false for all assignments. +-/ +def Entrypoint.Unsat (entry : Entrypoint α) : Prop := + entry.aig.UnsatAt entry.ref.gate entry.ref.hgate + +/-- +An input to an AIG gate. +-/ +structure Fanin (aig : AIG α) where + /-- + The node we are referring to. + -/ + ref : Ref aig + /-- + Whether the node is inverted + -/ + inv : Bool + +/-- +The `Ref.cast` equivalent for `Fanin`. +-/ +@[inline] +def Fanin.cast {aig1 aig2 : AIG α} (fanin : Fanin aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + Fanin aig2 := + { fanin with ref := fanin.ref.cast h } + +/-- +The input type for creating AIG and gates. +-/ +structure GateInput (aig : AIG α) where + lhs : Fanin aig + rhs : Fanin aig + +/-- +The `Ref.cast` equivalent for `GateInput`. +-/ +@[inline] +def GateInput.cast {aig1 aig2 : AIG α} (input : GateInput aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + GateInput aig2 := + { input with lhs := input.lhs.cast h, rhs := input.rhs.cast h } + +/-- +Add a new and inverter gate to the AIG in `aig`. Note that this version is only meant for proving, +for production purposes use `AIG.mkGateCached` and equality theorems to this one. +-/ +def mkGate (aig : AIG α) (input : GateInput aig) : Entrypoint α := + let g := aig.decls.size + let decls := + aig.decls.push <| .gate input.lhs.ref.gate input.rhs.ref.gate input.lhs.inv input.rhs.inv + let cache := aig.cache.noUpdate + have invariant := by + intro i lhs' rhs' linv' rinv' h1 h2 + simp only [Array.get_push] at h2 + split at h2 + . apply aig.invariant <;> assumption + . injections + have := input.lhs.ref.hgate + have := input.rhs.ref.hgate + omega + ⟨{ aig with decls, invariant, cache }, ⟨g, by simp [decls]⟩⟩ + +/-- +Add a new input node to the AIG in `aig`. Note that this version is only meant for proving, +for production purposes use `AIG.mkAtomCached` and equality theorems to this one. +-/ +def mkAtom (aig : AIG α) (n : α) : Entrypoint α := + let g := aig.decls.size + let decls := aig.decls.push (.atom n) + let cache := aig.cache.noUpdate + have invariant := by + intro i lhs rhs linv rinv h1 h2 + simp only [Array.get_push] at h2 + split at h2 + . apply aig.invariant <;> assumption + . contradiction + ⟨{ decls, invariant, cache }, ⟨g, by simp [decls]⟩⟩ + +/-- +Add a new constant node to `aig`. Note that this version is only meant for proving, +for production purposes use `AIG.mkConstCached` and equality theorems to this one. +-/ +def mkConst (aig : AIG α) (val : Bool) : Entrypoint α := + let g := aig.decls.size + let decls := aig.decls.push (.const val) + let cache := aig.cache.noUpdate + have invariant := by + intro i lhs rhs linv rinv h1 h2 + simp only [Array.get_push] at h2 + split at h2 + . apply aig.invariant <;> assumption + . contradiction + ⟨{ decls, invariant, cache }, ⟨g, by simp [decls]⟩⟩ + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/CNF.lean b/src/Std/Sat/AIG/CNF.lean new file mode 100644 index 000000000000..42986c17ded0 --- /dev/null +++ b/src/Std/Sat/AIG/CNF.lean @@ -0,0 +1,725 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.CNF +import Std.Sat.AIG.Basic +import Std.Sat.AIG.Lemmas + + +/-! +This module contains an implementation of a verified Tseitin transformation on AIGs. The key results +are the `toCNF` function and the `toCNF_equisat` correctness statement. The implementation is +done in the style of section 3.4 of the AIGNET paper. +-/ + +namespace Std +namespace Sat + +namespace AIG + +namespace Decl + +/-- +Produce a Tseitin style CNF for a `Decl.const`, using `output` as the tree node variable. +-/ +def constToCNF (output : α) (const : Bool) : CNF α := + [[(output, const)]] + +/-- +Produce a Tseitin style CNF for a `Decl.atom`, using `output` as the tree node variable. +-/ +def atomToCNF (output : α) (atom : α) : CNF α := + [[(output, true), (atom, false)], [(output, false), (atom, true)]] + +/-- +Produce a Tseitin style CNF for a `Decl.gate`, using `output` as the tree node variable. +-/ +def gateToCNF (output : α) (lhs rhs : α) (linv rinv : Bool) : CNF α := + -- a ↔ (b and c) as CNF: (¬a ∨ b) ∧ (¬a ∨ c) ∧ (a ∨ ¬b ∨ ¬c) + -- a ↔ (b and ¬c) as CNF: (¬a ∨ b) ∧ (¬a ∨ ¬c) ∧ (a ∨ ¬b ∨ c) + -- a ↔ (¬b and c) as CNF: (¬a ∨ ¬b) ∧ (¬a ∨ c) ∧ (a ∨ b ∨ ¬c) + -- a ↔ (¬b and ¬c) as CNF: (¬a ∨ ¬b) ∧ (¬a ∨ ¬c) ∧ (a ∨ b ∨ c) + [ + [(output, false), (lhs, !linv)], + [(output, false), (rhs, !rinv)], + [(output, true), (lhs, linv), (rhs, rinv)] + ] + +@[simp] +theorem constToCNF_eval : + (constToCNF output b).eval assign + = + (assign output == b) := by + simp [constToCNF, CNF.eval, CNF.Clause.eval] + +@[simp] +theorem atomToCNF_eval : + (atomToCNF output a).eval assign + = + (assign output == assign a) := by + simp only [atomToCNF, CNF.eval_cons, CNF.Clause.eval_cons, beq_true, beq_false, + CNF.Clause.eval_nil, Bool.or_false, CNF.eval_nil, Bool.and_true] + cases assign output <;> cases assign a <;> decide + +@[simp] +theorem gateToCNF_eval : + (gateToCNF output lhs rhs linv rinv).eval assign + = + (assign output == ((xor (assign lhs) linv) && (xor (assign rhs) rinv))) := by + simp only [CNF.eval, gateToCNF, CNF.Clause.eval, List.all_cons, List.any_cons, beq_false, + List.any_nil, Bool.or_false, beq_true, List.all_nil, Bool.and_true] + cases assign output + <;> cases assign lhs + <;> cases assign rhs + <;> cases linv + <;> cases rinv + <;> decide + +end Decl + +abbrev CNFVar (aig : AIG Nat) := Nat ⊕ (Fin aig.decls.size) + +namespace toCNF + +/-- +Mix: +1. An assignment for AIG atoms +2. An assignment for auxiliary Tseitin variables +into an assignment that can be used by a CNF produced by our Tseitin transformation. +-/ +def mixAssigns {aig : AIG Nat} (assign1 : Nat → Bool) (assign2 : Fin aig.decls.size → Bool) : + CNFVar aig → Bool + | .inl var => assign1 var + | .inr var => assign2 var + +/-- +Project the atom assignment out of a CNF assignment +-/ +def projectLeftAssign (assign : CNFVar aig → Bool) : Nat → Bool := (assign <| .inl ·) + +/-- +Project the auxiliary variable assignment out of a CNF assignment +-/ +def projectRightAssign (assign : CNFVar aig → Bool) : (idx : Nat) → (idx < aig.decls.size) → Bool := + fun idx h => assign (.inr ⟨idx, h⟩) + +@[simp] +theorem projectLeftAssign_property : (projectLeftAssign assign) x = (assign <| .inl x) := by + simp [projectLeftAssign] + +@[simp] +theorem projectRightAssign_property : + (projectRightAssign assign) x hx = (assign <| .inr ⟨x, hx⟩) := by + simp [projectRightAssign] + +/-- +Given an atom assignment, produce an assignment that will always satisfy the CNF generated by our +Tseitin transformation. This is done by combining the atom assignment with an assignment for the +auxiliary variables, that just evaluates the AIG at the corresponding node. +-/ +def cnfSatAssignment (aig : AIG Nat) (assign1 : Nat → Bool) : CNFVar aig → Bool := + mixAssigns assign1 (fun idx => ⟦aig, ⟨idx.val, idx.isLt⟩, assign1⟧) + +@[simp] +theorem satAssignment_inl : (cnfSatAssignment aig assign1) (.inl x) = assign1 x := by + simp [cnfSatAssignment, mixAssigns] + +@[simp] +theorem satAssignment_inr : + (cnfSatAssignment aig assign1) (.inr x) = ⟦aig, ⟨x.val, x.isLt⟩, assign1⟧ := by + simp [cnfSatAssignment, mixAssigns] + +/-- +The central invariants for the `Cache`. +-/ +structure Cache.Inv (cnf : CNF (CNFVar aig)) (marks : Array Bool) (hmarks : marks.size = aig.decls.size) : Prop where + /-- + If there exists an AIG node that is marked, its children are also guaranteed to be marked already. + This allows us to conclude that if a gate node is marked, all of its (transitive) children are + also marked. + -/ + hmark : ∀ (lhs rhs : Nat) (linv rinv : Bool) (idx : Nat) (hbound : idx < aig.decls.size) + (_hmarked : marks[idx] = true) (heq : aig.decls[idx] = .gate lhs rhs linv rinv), + marks[lhs]'(by have := aig.invariant hbound heq; omega) = true + ∧ + marks[rhs]'(by have := aig.invariant hbound heq; omega) = true + /-- + Relate satisfiability results about our produced CNF to satisfiability results about the AIG that + we are processing. The intuition for this is: if a node is marked, its CNF (and all required + children CNFs according to `hmark`) are already part of the current CNF. Thus the current CNF is + already mirroring the semantics of the marked node. This means that if the CNF is satisfiable at + some assignment, we can evaluate the marked node under the atom part of that assignment and will + get the value that was assigned to the corresponding auxiliary variable as a result. + -/ + heval : ∀ (assign : CNFVar aig → Bool) (_heval : cnf.eval assign = true) (idx : Nat) + (hbound : idx < aig.decls.size) (_hmark : marks[idx]'(by omega) = true), + ⟦aig, ⟨idx, hbound⟩, projectLeftAssign assign⟧ = (projectRightAssign assign) idx hbound + + +/-- +The `Cache` invariant always holds for an empty CNF when all nodes are unmarked. +-/ +theorem Cache.Inv_init : Inv ([] : CNF (CNFVar aig)) (mkArray aig.decls.size false) (by simp) where + hmark := by + intro lhs rhs linv rinv idx hbound hmarked heq + simp at hmarked + heval := by + intro assign _ idx hbound hmark + simp at hmark + +/-- +The CNF cache. It keeps track of AIG nodes that we already turned into CNF to avoid adding the same +CNF twice. +-/ +structure Cache (aig : AIG Nat) (cnf : CNF (CNFVar aig)) where + /-- + Keeps track of AIG nodes that we already turned into CNF. + -/ + marks : Array Bool + /-- + There are always as many marks as AIG nodes. + -/ + hmarks : marks.size = aig.decls.size + /-- + The invariant to make sure that `marks` is well formed with respect to the `cnf` + -/ + inv : Cache.Inv cnf marks hmarks + +/-- +We say that a cache extends another by an index when it doesn't invalidate any entry and has an +entry for that index. +-/ +structure Cache.IsExtensionBy (cache1 : Cache aig cnf1) (cache2 : Cache aig cnf2) (new : Nat) + (hnew : new < aig.decls.size) : Prop where + /-- + No entry is invalidated. + -/ + extension : ∀ (idx : Nat) (hidx : idx < aig.decls.size), + cache1.marks[idx]'(by have := cache1.hmarks; omega) = true + → + cache2.marks[idx]'(by have := cache2.hmarks; omega) = true + /-- + The second cache is true at the new index. + -/ + trueAt : cache2.marks[new]'(by have := cache2.hmarks; omega) = true + +theorem Cache.IsExtensionBy_trans_left (cache1 : Cache aig cnf1) (cache2 : Cache aig cnf2) + (cache3 : Cache aig cnf3) (h12 : IsExtensionBy cache1 cache2 new1 hnew1) + (h23 : IsExtensionBy cache2 cache3 new2 hnew2) : IsExtensionBy cache1 cache3 new1 hnew1 := by + apply IsExtensionBy.mk + . intro idx hidx hmarked + apply h23.extension + . apply h12.extension + . exact hmarked + . omega + . omega + . apply h23.extension + . exact h12.trueAt + . omega + +theorem Cache.IsExtensionBy_trans_right (cache1 : Cache aig cnf1) (cache2 : Cache aig cnf2) + (cache3 : Cache aig cnf3) (h12 : IsExtensionBy cache1 cache2 new1 hnew1) + (h23 : IsExtensionBy cache2 cache3 new2 hnew2) : IsExtensionBy cache1 cache3 new2 hnew2 := by + apply IsExtensionBy.mk + . intro idx hidx hmarked + apply h23.extension + . apply h12.extension + . exact hmarked + . omega + . omega + . exact h23.trueAt + +/-- +Cache extension is a reflexive relation. +-/ +theorem Cache.IsExtensionBy_rfl (cache : Cache aig cnf) {h} (hmarked : cache.marks[idx]'h = true) : + Cache.IsExtensionBy cache cache idx (have := cache.hmarks; omega) := by + apply IsExtensionBy.mk + . intros + assumption + . exact hmarked + +theorem Cache.IsExtensionBy_set (cache1 : Cache aig cnf1) (cache2 : Cache aig cnf2) (idx : Nat) + (hbound : idx < cache1.marks.size) (h : cache2.marks = cache1.marks.set ⟨idx, hbound⟩ true) : + IsExtensionBy cache1 cache2 idx (by have := cache1.hmarks; omega) := by + apply IsExtensionBy.mk + . intro idx hidx hmark + simp [Array.getElem_set, hmark, h] + . simp [h] + +/-- +A cache with no entries is valid for an empty CNF. +-/ +def Cache.init (aig : AIG Nat) : Cache aig [] where + marks := mkArray aig.decls.size false + hmarks := by simp + inv := Inv_init + +/-- +Add a `Decl.const` to a `Cache`. +-/ +def Cache.addConst (cache : Cache aig cnf) (idx : Nat) (h : idx < aig.decls.size) + (htip : aig.decls[idx]'h = .const b) : + { + out : Cache aig (Decl.constToCNF (.inr ⟨idx, h⟩) b ++ cnf) + // + Cache.IsExtensionBy cache out idx h + } := + have hmarkbound : idx < cache.marks.size := by have := cache.hmarks; omega + let out := + { cache with + marks := cache.marks.set ⟨idx, hmarkbound⟩ true + hmarks := by simp [cache.hmarks] + inv := by + constructor + . intro lhs rhs linv rinv idx hbound hmarked heq + rw [Array.getElem_set] at hmarked + split at hmarked + . simp_all + . have := cache.inv.hmark lhs rhs linv rinv idx hbound hmarked heq + simp [Array.getElem_set, this] + . intro assign heval idx hbound hmarked + rw [Array.getElem_set] at hmarked + split at hmarked + . next heq => + dsimp only at heq + simp only [heq, CNF.eval_append, Decl.constToCNF_eval, Bool.and_eq_true, beq_iff_eq] + at htip heval + simp only [denote_idx_const htip, projectRightAssign_property, heval] + . next heq => + simp only [CNF.eval_append, Decl.constToCNF_eval, Bool.and_eq_true, beq_iff_eq] at heval + have := cache.inv.heval assign heval.right idx hbound hmarked + rw [this] + } + ⟨out, IsExtensionBy_set cache out idx hmarkbound (by simp [out])⟩ + +/-- +Add a `Decl.atom` to a cache. +-/ +def Cache.addAtom (cache : Cache aig cnf) (idx : Nat) (h : idx < aig.decls.size) + (htip : aig.decls[idx]'h = .atom a) : + { + out : Cache aig ((Decl.atomToCNF (.inr ⟨idx, h⟩) (.inl a)) ++ cnf) + // + Cache.IsExtensionBy cache out idx h + } := + have hmarkbound : idx < cache.marks.size := by have := cache.hmarks; omega + let out := + { cache with + marks := cache.marks.set ⟨idx, hmarkbound⟩ true + hmarks := by simp [cache.hmarks] + inv := by + constructor + . intro lhs rhs linv rinv idx hbound hmarked heq + rw [Array.getElem_set] at hmarked + split at hmarked + . simp_all + . have := cache.inv.hmark lhs rhs linv rinv idx hbound hmarked heq + simp [Array.getElem_set, this] + . intro assign heval idx hbound hmarked + rw [Array.getElem_set] at hmarked + split at hmarked + . next heq => + dsimp only at heq + simp only [heq, CNF.eval_append, Decl.atomToCNF_eval, Bool.and_eq_true, beq_iff_eq] at htip heval + simp [heval, denote_idx_atom htip] + . next heq => + simp only [CNF.eval_append, Decl.atomToCNF_eval, Bool.and_eq_true, beq_iff_eq] at heval + have := cache.inv.heval assign heval.right idx hbound hmarked + rw [this] + } + ⟨out, IsExtensionBy_set cache out idx hmarkbound (by simp [out])⟩ + +/-- +Add a `Decl.gate` to a cache. +-/ +def Cache.addGate (cache : Cache aig cnf) {hlb} {hrb} (idx : Nat) (h : idx < aig.decls.size) + (htip : aig.decls[idx]'h = .gate lhs rhs linv rinv) (hl : cache.marks[lhs]'hlb = true) + (hr : cache.marks[rhs]'hrb = true) : + { + out : Cache + aig + (Decl.gateToCNF + (.inr ⟨idx, h⟩) + (.inr ⟨lhs, by have := aig.invariant h htip; omega⟩) + (.inr ⟨rhs, by have := aig.invariant h htip; omega⟩) + linv + rinv + ++ cnf) + // + Cache.IsExtensionBy cache out idx h + } := + have := aig.invariant h htip + have hmarkbound : idx < cache.marks.size := by have := cache.hmarks; omega + let out := + { cache with + marks := cache.marks.set ⟨idx, hmarkbound⟩ true + hmarks := by simp [cache.hmarks] + inv := by + constructor + . intro lhs rhs linv rinv idx hbound hmarked heq + rw [Array.getElem_set] at hmarked + split at hmarked + . next heq2 => + simp only at heq2 + simp only [heq2] at htip + rw [htip] at heq + cases heq + simp [Array.getElem_set, hl, hr] + . have := cache.inv.hmark lhs rhs linv rinv idx hbound hmarked heq + simp [Array.getElem_set, this] + . intro assign heval idx hbound hmarked + rw [Array.getElem_set] at hmarked + split at hmarked + . next heq => + dsimp only at heq + simp only [heq, CNF.eval_append, Decl.gateToCNF_eval, Bool.and_eq_true, beq_iff_eq] + at htip heval + have hleval := cache.inv.heval assign heval.right lhs (by omega) hl + have hreval := cache.inv.heval assign heval.right rhs (by omega) hr + simp only [denote_idx_gate htip, hleval, projectRightAssign_property, hreval, heval] + . next heq => + simp only [CNF.eval_append, Decl.gateToCNF_eval, Bool.and_eq_true, beq_iff_eq] at heval + have := cache.inv.heval assign heval.right idx hbound hmarked + rw [this] + } + ⟨out, IsExtensionBy_set cache out idx hmarkbound (by simp [out])⟩ + +/-- +The key invariant about the `State` itself (without cache): The CNF we produce is always satisfiable +at `cnfSatAssignment`. +-/ +def State.Inv (cnf : CNF (CNFVar aig)) : Prop := + ∀ (assign1 : Nat → Bool), cnf.Sat (cnfSatAssignment aig assign1) + +/-- +The `State` invariant always holds when we have an empty CNF. +-/ +theorem State.Inv_nil : State.Inv ([] : CNF (CNFVar aig)) := by + simp [State.Inv] + +/-- +Combining two CNFs for which `State.Inv` holds preserves `State.Inv`. +-/ +theorem State.Inv_append (h1 : State.Inv cnf1) (h2 : State.Inv cnf2) : + State.Inv (cnf1 ++ cnf2) := by + intro assign1 + specialize h1 assign1 + specialize h2 assign1 + simp [CNF.sat_def] at h1 h2 ⊢ + constructor <;> assumption + +/-- +`State.Inv` holds for the CNF that we produce for a `Decl.const`. +-/ +theorem State.Inv_constToCNF (heq : aig.decls[upper] = .const b) : + State.Inv (aig := aig) (Decl.constToCNF (.inr ⟨upper, h⟩) b) := by + intro assign1 + simp [CNF.sat_def, denote_idx_const heq] + +/-- +`State.Inv` holds for the CNF that we produce for a `Decl.atom` +-/ +theorem State.Inv_atomToCNF (heq : aig.decls[upper] = .atom a) : + State.Inv (aig := aig) (Decl.atomToCNF (.inr ⟨upper, h⟩) (.inl a)) := by + intro assign1 + simp [CNF.sat_def, denote_idx_atom heq] + +/-- +`State.Inv` holds for the CNF that we produce for a `Decl.gate` +-/ +theorem State.Inv_gateToCNF {aig : AIG Nat} {h} + (heq : aig.decls[upper]'h = .gate lhs rhs linv rinv) : + State.Inv + (aig := aig) + (Decl.gateToCNF + (.inr ⟨upper, h⟩) + (.inr ⟨lhs, by have := aig.invariant h heq; omega⟩) + (.inr ⟨rhs, by have := aig.invariant h heq; omega⟩) + linv + rinv) + := by + intro assign1 + simp [CNF.sat_def, denote_idx_gate heq] + +/-- +The state to accumulate CNF clauses as we run our Tseitin transformation on the AIG. +-/ +structure State (aig : AIG Nat) where + /-- + The CNF clauses so far. + -/ + cnf : CNF (CNFVar aig) + /-- + A cache so that we don't generate CNF for an AIG node more than once. + -/ + cache : Cache aig cnf + /-- + The invariant that `cnf` has to maintain as we build it up. + -/ + inv : State.Inv cnf + +/-- +An initial state with no CNF clauses and an empty cache. +-/ +def State.empty (aig : AIG Nat) : State aig where + cnf := [] + cache := Cache.init aig + inv := State.Inv_nil + +/-- +State extension are `Cache.IsExtensionBy` for now. +-/ +abbrev State.IsExtensionBy (state1 : State aig) (state2 : State aig) (new : Nat) + (hnew : new < aig.decls.size) : Prop := + Cache.IsExtensionBy state1.cache state2.cache new hnew + +theorem State.IsExtensionBy_trans_left (state1 : State aig) (state2 : State aig) + (state3 : State aig) (h12 : IsExtensionBy state1 state2 new1 hnew1) + (h23 : IsExtensionBy state2 state3 new2 hnew2) : IsExtensionBy state1 state3 new1 hnew1 := by + apply Cache.IsExtensionBy_trans_left + . exact h12 + . exact h23 + +theorem State.IsExtensionBy_trans_right (state1 : State aig) (state2 : State aig) + (state3 : State aig) (h12 : IsExtensionBy state1 state2 new1 hnew1) + (h23 : IsExtensionBy state2 state3 new2 hnew2) : IsExtensionBy state1 state3 new2 hnew2 := by + apply Cache.IsExtensionBy_trans_right + . exact h12 + . exact h23 + +/-- +State extension is a reflexive relation. +-/ +theorem State.IsExtensionBy_rfl (state : State aig) {h} + (hmarked : state.cache.marks[idx]'h = true) : + State.IsExtensionBy state state idx (have := state.cache.hmarks; omega) := by + apply Cache.IsExtensionBy_rfl <;> assumption + +/-- +Add the CNF for a `Decl.const` to the state. +-/ +def State.addConst (state : State aig) (idx : Nat) (h : idx < aig.decls.size) + (htip : aig.decls[idx]'h = .const b) : + { out : State aig // State.IsExtensionBy state out idx h } := + let ⟨cnf, cache, inv⟩ := state + let newCnf := Decl.constToCNF (.inr ⟨idx, h⟩) b + have hinv := toCNF.State.Inv_constToCNF htip + let ⟨cache, hcache⟩ := cache.addConst idx h htip + ⟨⟨newCnf ++ cnf, cache, State.Inv_append hinv inv⟩, by simp [hcache]⟩ + +/-- +Add the CNF for a `Decl.atom` to the state. +-/ +def State.addAtom (state : State aig) (idx : Nat) (h : idx < aig.decls.size) + (htip : aig.decls[idx]'h = .atom a) : + { out : State aig // State.IsExtensionBy state out idx h } := + let ⟨cnf, cache, inv⟩ := state + let newCnf := Decl.atomToCNF (.inr ⟨idx, h⟩) (.inl a) + have hinv := toCNF.State.Inv_atomToCNF htip + let ⟨cache, hcache⟩ := cache.addAtom idx h htip + ⟨⟨newCnf ++ cnf, cache, State.Inv_append hinv inv⟩, by simp [hcache]⟩ + +/-- +Add the CNF for a `Decl.gate` to the state. +-/ +def State.addGate (state : State aig) {hlb} {hrb} (idx : Nat) (h : idx < aig.decls.size) + (htip : aig.decls[idx]'h = .gate lhs rhs linv rinv) (hl : state.cache.marks[lhs]'hlb = true) + (hr : state.cache.marks[rhs]'hrb = true) : + { out : State aig // State.IsExtensionBy state out idx h } := + have := aig.invariant h htip + let ⟨cnf, cache, inv⟩ := state + let newCnf := + Decl.gateToCNF + (.inr ⟨idx, h⟩) + (.inr ⟨lhs, by omega⟩) + (.inr ⟨rhs, by omega⟩) + linv + rinv + have hinv := toCNF.State.Inv_gateToCNF htip + let ⟨cache, hcache⟩ := cache.addGate idx h htip hl hr + ⟨⟨newCnf ++ cnf, cache, State.Inv_append hinv inv⟩, by simp [hcache]⟩ + +/-- +Evaluate the CNF contained within the state. +-/ +def State.eval (assign : CNFVar aig → Bool) (state : State aig) : Bool := + state.cnf.eval assign + +/-- +The CNF within the state is sat. +-/ +def State.Sat (assign : CNFVar aig → Bool) (state : State aig) : Prop := + state.cnf.Sat assign + +/-- +The CNF within the state is unsat. +-/ +def State.Unsat (state : State aig) : Prop := + state.cnf.Unsat + +theorem State.sat_def (assign : CNFVar aig → Bool) (state : State aig) : + state.Sat assign ↔ state.cnf.Sat assign := by + rfl + +theorem State.unsat_def (state : State aig) : + state.Unsat ↔ state.cnf.Unsat := by + rfl + +@[simp] +theorem State.eval_eq : State.eval assign state = state.cnf.eval assign := by simp [State.eval] + +@[simp] +theorem State.sat_iff : State.Sat assign state ↔ state.cnf.Sat assign := by simp [State.sat_def] + +@[simp] +theorem State.unsat_iff : State.Unsat state ↔ state.cnf.Unsat := by simp [State.unsat_def] + +end toCNF + +/-- +Convert an AIG into CNF, starting at some entry node. +-/ +def toCNF (entry : Entrypoint Nat) : CNF Nat := + let ⟨state, _⟩ := go entry.aig entry.ref.gate entry.ref.hgate (toCNF.State.empty entry.aig) + let cnf : CNF (CNFVar entry.aig) := [(.inr ⟨entry.ref.gate, entry.ref.hgate⟩, true)] :: state.cnf + cnf.relabel inj +where + inj {aig : AIG Nat} (var : CNFVar aig) : Nat := + match var with + | .inl var => aig.decls.size + var + | .inr var => var.val + go (aig : AIG Nat) (upper : Nat) (h : upper < aig.decls.size) (state : toCNF.State aig) : + { out : toCNF.State aig // toCNF.State.IsExtensionBy state out upper h } := + if hmarked : state.cache.marks[upper]'(by have := state.cache.hmarks; omega) then + ⟨state, by apply toCNF.State.IsExtensionBy_rfl <;> assumption⟩ + else + let decl := aig.decls[upper] + match heq : decl with + | .const b => state.addConst upper h heq + | .atom a => state.addAtom upper h heq + | .gate lhs rhs linv rinv => + have := aig.invariant h heq + let ⟨lstate, hlstate⟩ := go aig lhs (by omega) state + let ⟨rstate, hrstate⟩ := go aig rhs (by omega) lstate + + have : toCNF.State.IsExtensionBy state rstate lhs (by omega) := by + apply toCNF.State.IsExtensionBy_trans_left + . exact hlstate + . exact hrstate + + let ⟨ret, hretstate⟩ := rstate.addGate upper h heq this.trueAt hrstate.trueAt + ⟨ + ret, + by + apply toCNF.State.IsExtensionBy_trans_right + . exact hlstate + . apply toCNF.State.IsExtensionBy_trans_right + . exact hrstate + . exact hretstate + ⟩ + +/-- +The function we use to convert from CNF with explicit auxiliary variables to just `Nat` variables +in `toCNF` is an injection. +-/ +theorem toCNF.inj_is_injection {aig : AIG Nat} (a b : CNFVar aig) : + toCNF.inj a = toCNF.inj b → a = b := by + intro h + cases a with + | inl => + cases b with + | inl => + dsimp only [inj] at h + congr + omega + | inr rhs => + exfalso + dsimp only [inj] at h + have := rhs.isLt + omega + | inr lhs => + cases b with + | inl => + dsimp only [inj] at h + omega + | inr => + dsimp only [inj] at h + congr + omega + +/-- +The node that we started CNF conversion at will always be marked as visited in the CNF cache. +-/ +theorem toCNF.go_marks : + (go aig start h state).val.cache.marks[start]'(by have := (go aig start h state).val.cache.hmarks; omega) = true := + (go aig start h state).property.trueAt + +/-- +The CNF returned by `go` will always be SAT at `cnfSatAssignment`. +-/ +theorem toCNF.go_sat (aig : AIG Nat) (start : Nat) (h1 : start < aig.decls.size) (assign1 : Nat → Bool) + (state : toCNF.State aig) : + (go aig start h1 state).val.Sat (cnfSatAssignment aig assign1) := by + have := (go aig start h1 state).val.inv assign1 + rw [State.sat_iff] + simp [this] + +theorem toCNF.go_as_denote' (aig : AIG Nat) (start) (h1) (assign1) : + ⟦aig, ⟨start, h1⟩, assign1⟧ → (go aig start h1 (.empty aig)).val.eval (cnfSatAssignment aig assign1) := by + have := go_sat aig start h1 assign1 (.empty aig) + simp only [State.Sat, CNF.sat_def] at this + simp [this] + +/-- +Connect SAT results about the CNF to SAT results about the AIG. +-/ +theorem toCNF.go_as_denote (aig : AIG Nat) (start) (h1) (assign1) : + ((⟦aig, ⟨start, h1⟩, assign1⟧ && (go aig start h1 (.empty aig)).val.eval (cnfSatAssignment aig assign1)) = sat?) + → + (⟦aig, ⟨start, h1⟩, assign1⟧ = sat?) := by + have := go_as_denote' aig start h1 assign1 + by_cases CNF.eval (cnfSatAssignment aig assign1) (go aig start h1 (State.empty aig)).val.cnf <;> simp_all + +/-- +Connect SAT results about the AIG to SAT results about the CNF. +-/ +theorem toCNF.denote_as_go {assign : AIG.CNFVar aig → Bool}: + (⟦aig, ⟨start, h1⟩, projectLeftAssign assign⟧ = false) + → + CNF.eval assign (([(.inr ⟨start, h1⟩, true)] :: (go aig start h1 (.empty aig)).val.cnf)) = false := by + intro h + match heval1:(go aig start h1 (State.empty aig)).val.cnf.eval assign with + | true => + have heval2 := (go aig start h1 (.empty aig)).val.cache.inv.heval + specialize heval2 assign heval1 start h1 go_marks + simp only [h, projectRightAssign_property, Bool.false_eq] at heval2 + simp [heval2] + | false => + simp [heval1] + +/-- +An AIG is unsat iff its CNF is unsat. +-/ +theorem toCNF_equisat (entry : Entrypoint Nat) : (toCNF entry).Unsat ↔ entry.Unsat := by + dsimp only [toCNF] + rw [CNF.unsat_relabel_iff] + . constructor + . intro h assign1 + apply toCNF.go_as_denote + specialize h (toCNF.cnfSatAssignment entry.aig assign1) + simpa using h + . intro h assign + apply toCNF.denote_as_go + specialize h (toCNF.projectLeftAssign assign) + assumption + . intro a b _ _ hinj + apply toCNF.inj_is_injection + assumption + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/Cached.lean b/src/Std/Sat/AIG/Cached.lean new file mode 100644 index 000000000000..ef9f778efa86 --- /dev/null +++ b/src/Std/Sat/AIG/Cached.lean @@ -0,0 +1,132 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Basic +import Std.Sat.AIG.Lemmas + +/-! +This module contains functions to construct AIG nodes while making use of the sub-circuit cache +if possible. For performance reasons these functions should usually be preferred over the naive +AIG node creation ones. +-/ + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] + +/-- +A version of `AIG.mkAtom` that uses the subterm cache in `AIG`. This version is meant for +programmming, for proving purposes use `AIG.mkAtom` and equality theorems to this one. +-/ +def mkAtomCached (aig : AIG α) (n : α) : Entrypoint α := + let ⟨decls, cache, inv⟩ := aig + let decl := .atom n + match cache.get? decl with + | some hit => + ⟨⟨decls, cache, inv⟩ , hit.idx, hit.hbound⟩ + | none => + let g := decls.size + let cache := cache.insert decls decl + let decls := decls.push decl + have inv := by + intro i lhs rhs linv rinv h1 h2 + simp only [Array.get_push] at h2 + split at h2 + . apply inv <;> assumption + . contradiction + ⟨⟨decls, cache, inv⟩, ⟨g, by simp [g, decls]⟩⟩ + +/-- +A version of `AIG.mkConst` that uses the subterm cache in `AIG`. This version is meant for +programmming, for proving purposes use `AIG.mkGate` and equality theorems to this one. +-/ +def mkConstCached (aig : AIG α) (val : Bool) : Entrypoint α := + let ⟨decls, cache, inv⟩ := aig + let decl := .const val + match cache.get? decl with + | some hit => + ⟨⟨decls, cache, inv⟩, hit.idx, hit.hbound⟩ + | none => + let g := decls.size + let cache := cache.insert decls decl + let decls := decls.push decl + have inv := by + intro i lhs rhs linv rinv h1 h2 + simp only [Array.get_push] at h2 + split at h2 + . apply inv <;> assumption + . contradiction + ⟨⟨decls, cache, inv⟩, ⟨g, by simp [g, decls]⟩⟩ + +/-- +A version of `AIG.mkGate` that uses the subterm cache in `AIG`. This version is meant for +programmming, for proving purposes use `AIG.mkGate` and equality theorems to this one. + +Beyond caching this function also implements a subset of the optimizations presented in: +-/ +def mkGateCached (aig : AIG α) (input : GateInput aig) : Entrypoint α := + let lhs := input.lhs.ref.gate + let rhs := input.rhs.ref.gate + if lhs < rhs then + go aig ⟨input.lhs, input.rhs⟩ + else + go aig ⟨input.rhs, input.lhs⟩ +where + go (aig : AIG α) (input : GateInput aig) : Entrypoint α := + let ⟨decls, cache, inv⟩ := aig + let lhs := input.lhs.ref.gate + let rhs := input.rhs.ref.gate + let linv := input.lhs.inv + let rinv := input.rhs.inv + have := input.lhs.ref.hgate + have := input.rhs.ref.hgate + let decl := .gate lhs rhs linv rinv + match cache.get? decl with + | some hit => + ⟨⟨decls, cache, inv⟩, ⟨hit.idx, hit.hbound⟩⟩ + | none => + /- + Here we implement the constant propagating subset of: + https://fmv.jku.at/papers/BrummayerBiere-MEMICS06.pdf + TODO: rest of the table + -/ + match decls[lhs], decls[rhs], linv, rinv with + -- Boundedness + | .const true, _, true, _ | .const false, _, false, _ + | _, .const true, _, true | _, .const false, _, false => + mkConstCached ⟨decls, cache, inv⟩ false + -- Left Neutrality + | .const true, _, false, false | .const false, _, true, false => + ⟨⟨decls, cache, inv⟩, rhs, (by assumption)⟩ + -- Right Neutrality + | _, .const true, false, false | _, .const false, false, true => + ⟨⟨decls, cache, inv⟩, lhs, (by assumption)⟩ + | _, _, _, _ => + if lhs == rhs && linv == false && rinv == false then + -- Idempotency rule + ⟨⟨decls, cache, inv⟩, lhs, (by assumption)⟩ + else if lhs == rhs && linv == !rinv then + -- Contradiction rule + mkConstCached ⟨decls, cache, inv⟩ false + else + let g := decls.size + let cache := cache.insert decls decl + let decls := decls.push decl + have inv := by + intro i lhs rhs linv rinv h1 h2 + simp only [decls] at * + simp only [Array.get_push] at h2 + split at h2 + . apply inv <;> assumption + . injections; omega + ⟨⟨decls, cache, inv⟩, ⟨g, by simp [g, decls]⟩⟩ + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/CachedGates.lean b/src/Std/Sat/AIG/CachedGates.lean new file mode 100644 index 000000000000..b81c0f1e1f41 --- /dev/null +++ b/src/Std/Sat/AIG/CachedGates.lean @@ -0,0 +1,175 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Cached +import Std.Sat.AIG.CachedLemmas + +/-! +This module contains functions to construct basic logic gates while making use of the sub-circuit +cache if possible. +-/ + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] + +/-- +Create a not gate in the input AIG. This uses the builtin cache to enable automated subterm sharing. +-/ +def mkNotCached (aig : AIG α) (gate : Ref aig) : Entrypoint α := + -- ¬x = true && invert x + let res := aig.mkConstCached true + let aig := res.aig + let constRef := res.ref + aig.mkGateCached { + lhs := { + ref := constRef + inv := false + } + rhs := { + ref := gate.cast <| by + intros + apply LawfulOperator.le_size_of_le_aig_size (f := mkConstCached) + omega + inv := true + } + } + +@[inline] +def BinaryInput.asGateInput {aig : AIG α} (input : BinaryInput aig) (linv rinv : Bool) : + GateInput aig := + { lhs := { ref := input.lhs, inv := linv }, rhs := { ref := input.rhs, inv := rinv } } + +/-- +Create an and gate in the input AIG. This uses the builtin cache to enable automated subterm +sharing. +-/ +def mkAndCached (aig : AIG α) (input : BinaryInput aig) : Entrypoint α := + aig.mkGateCached <| input.asGateInput false false + +/-- +Create an or gate in the input AIG. This uses the builtin cache to enable automated subterm sharing. +-/ +def mkOrCached (aig : AIG α) (input : BinaryInput aig) : Entrypoint α := + -- x or y = true && (invert (invert x && invert y)) + let res := aig.mkGateCached <| input.asGateInput true true + let aig := res.aig + let auxRef := res.ref + let res := aig.mkConstCached true + let aig := res.aig + let constRef := res.ref + aig.mkGateCached { + lhs := { + ref := constRef + inv := false + }, + rhs := { + ref := auxRef.cast <| by + intros + simp (config := { zetaDelta := true }) only + apply LawfulOperator.le_size_of_le_aig_size (f := mkConstCached) + omega + inv := true + } + } + +/-- +Create an xor gate in the input AIG. This uses the builtin cache to enable automated subterm +sharing. +-/ +def mkXorCached (aig : AIG α) (input : BinaryInput aig) : Entrypoint α := + -- x xor y = (invert (invert (x && y))) && (invert ((invert x) && (invert y))) + let res := aig.mkGateCached <| input.asGateInput false false + let aig := res.aig + let aux1Ref := res.ref + let rinput := + (input.asGateInput true true).cast + (by + intros + apply LawfulOperator.le_size_of_le_aig_size (f := mkGateCached) + omega) + let res := aig.mkGateCached rinput + let aig := res.aig + let aux2Ref := res.ref + aig.mkGateCached { + lhs := { + ref := aux1Ref.cast <| by + simp (config := { zetaDelta := true }) only + apply LawfulOperator.le_size_of_le_aig_size (f := mkGateCached) + omega + inv := true + }, + rhs := { + ref := aux2Ref + inv := true + } + } + +/-- +Create an equality gate in the input AIG. This uses the builtin cache to enable automated subterm +sharing. +-/ +def mkBEqCached (aig : AIG α) (input : BinaryInput aig) : Entrypoint α := + -- a == b = (invert (a && (invert b))) && (invert ((invert a) && b)) + let res := aig.mkGateCached <| input.asGateInput false true + let aig := res.aig + let aux1Ref := res.ref + let rinput := + (input.asGateInput true false).cast + (by + intros + apply LawfulOperator.le_size_of_le_aig_size (f := mkGateCached) + omega) + let res := aig.mkGateCached rinput + let aig := res.aig + let aux2Ref := res.ref + aig.mkGateCached { + lhs := { + ref := aux1Ref.cast <| by + simp (config := { zetaDelta := true }) only + apply LawfulOperator.le_size_of_le_aig_size (f := mkGateCached) + omega + inv := true + }, + rhs := { + ref := aux2Ref + inv := true + } + } + +/-- +Create an implication gate in the input AIG. This uses the builtin cache to enable automated subterm +sharing. +-/ +def mkImpCached (aig : AIG α) (input : BinaryInput aig) : Entrypoint α := + -- a -> b = true && (invert (a and (invert b))) + let res := aig.mkGateCached <| input.asGateInput false true + let aig := res.aig + let auxRef := res.ref + let res := aig.mkConstCached true + let aig := res.aig + let constRef := res.ref + aig.mkGateCached { + lhs := { + ref := constRef + inv := false + }, + rhs := { + ref := auxRef.cast <| by + intros + simp (config := { zetaDelta := true }) only + apply LawfulOperator.le_size_of_le_aig_size (f := mkConstCached) + omega + inv := true + } + } + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/CachedGatesLemmas.lean b/src/Std/Sat/AIG/CachedGatesLemmas.lean new file mode 100644 index 000000000000..e6e9255b4507 --- /dev/null +++ b/src/Std/Sat/AIG/CachedGatesLemmas.lean @@ -0,0 +1,268 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.CachedGates +import Std.Sat.AIG.LawfulOperator + +/-! +This module contains the theory of the cached gate creation functions, mostly enabled by the +`LawfulOperator` framework. It is mainly concerned with proving lemmas about the denotational +semantics of the gate functions in different scenarios. +-/ + +namespace Std +namespace Sat + +namespace AIG + +/-- +Encoding of not as boolean expression in AIG form. +-/ +private theorem not_as_aig : ∀ (b : Bool), (true && !b) = !b := by + decide + +/-- +Encoding of or as boolean expression in AIG form. +-/ +private theorem or_as_aig : ∀ (a b : Bool), (!(!a && !b)) = (a || b) := by + decide + +/-- +Encoding of XOR as boolean expression in AIG form. +-/ +private theorem xor_as_aig : ∀ (a b : Bool), (!(a && b) && !(!a && !b)) = (xor a b) := by + decide + +/-- +Encoding of BEq as boolean expression in AIG form. +-/ +private theorem beq_as_aig : ∀ (a b : Bool), (!(a && !b) && !(!a && b)) = (a == b) := by + decide + +/-- +Encoding of implication as boolean expression in AIG form. +-/ +private theorem imp_as_aig : ∀ (a b : Bool), (!(a && !b)) = (!a || b) := by + decide + +variable {α : Type} [Hashable α] [DecidableEq α] + +@[simp] +theorem BinaryInput_asGateInput_lhs {aig : AIG α} (input : BinaryInput aig) (linv rinv : Bool) : + (input.asGateInput linv rinv).lhs = ⟨input.lhs, linv⟩ := rfl + +@[simp] +theorem BinaryInput_asGateInput_rhs {aig : AIG α} (input : BinaryInput aig) (linv rinv : Bool) : + (input.asGateInput linv rinv).rhs = ⟨input.rhs, rinv⟩ := rfl + +theorem mkNotCached_le_size (aig : AIG α) (gate : Ref aig) : + aig.decls.size ≤ (aig.mkNotCached gate).aig.decls.size := by + simp only [mkNotCached] + apply LawfulOperator.le_size_of_le_aig_size + apply mkConstCached_le_size + +theorem mkNotCached_decl_eq idx (aig : AIG α) (gate : Ref aig) {h : idx < aig.decls.size} {h2} : + (aig.mkNotCached gate).aig.decls[idx]'h2 = aig.decls[idx] := by + simp only [mkNotCached] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + rw [AIG.LawfulOperator.decl_eq (f := mkConstCached)] + apply LawfulOperator.lt_size_of_lt_aig_size (f := mkConstCached) + assumption + +instance : LawfulOperator α Ref mkNotCached where + le_size := mkNotCached_le_size + decl_eq := by + intros + apply mkNotCached_decl_eq + +@[simp] +theorem denote_mkNotCached {aig : AIG α} {gate : Ref aig} : + ⟦aig.mkNotCached gate, assign⟧ + = + !⟦aig, ⟨gate.gate, gate.hgate⟩, assign⟧ := by + rw [← not_as_aig] + simp [mkNotCached, LawfulOperator.denote_mem_prefix (f := mkConstCached) gate.hgate] + +theorem mkAndCached_le_size (aig : AIG α) (input : BinaryInput aig) : + aig.decls.size ≤ (aig.mkAndCached input).aig.decls.size := by + simp only [mkAndCached] + apply LawfulOperator.le_size_of_le_aig_size + omega + +theorem mkAndCached_decl_eq idx (aig : AIG α) (input : BinaryInput aig) {h : idx < aig.decls.size} + {h2} : + (aig.mkAndCached input).aig.decls[idx]'h2 = aig.decls[idx] := by + simp only [mkAndCached] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + +instance : LawfulOperator α BinaryInput mkAndCached where + le_size := mkAndCached_le_size + decl_eq := by intros; apply mkAndCached_decl_eq + +@[simp] +theorem denote_mkAndCached {aig : AIG α} {input : BinaryInput aig} : + ⟦aig.mkAndCached input, assign⟧ + = + (⟦aig, input.lhs, assign⟧ + && + ⟦aig, input.rhs, assign⟧) := by + simp [mkAndCached] + +theorem mkOrCached_le_size (aig : AIG α) (input : BinaryInput aig) : + aig.decls.size ≤ (aig.mkOrCached input).aig.decls.size := by + simp only [mkOrCached] + apply LawfulOperator.le_size_of_le_aig_size + apply LawfulOperator.le_size_of_le_aig_size (f := mkConstCached) + apply LawfulOperator.le_size_of_le_aig_size + omega + +theorem mkOrCached_decl_eq idx (aig : AIG α) (input : BinaryInput aig) {h : idx < aig.decls.size} + {h2} : + (aig.mkOrCached input).aig.decls[idx]'h2 = aig.decls[idx] := by + simp only [mkOrCached] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + rw [AIG.LawfulOperator.decl_eq (f := mkConstCached)] + . rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . apply LawfulOperator.lt_size_of_lt_aig_size (f := mkConstCached) + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + +instance : LawfulOperator α BinaryInput mkOrCached where + le_size := mkOrCached_le_size + decl_eq := by intros; apply mkOrCached_decl_eq + +@[simp] +theorem denote_mkOrCached {aig : AIG α} {input : BinaryInput aig} : + ⟦aig.mkOrCached input, assign⟧ + = + (⟦aig, input.lhs, assign⟧ + || + ⟦aig, input.rhs, assign⟧) := by + rw [← or_as_aig] + simp [mkOrCached, LawfulOperator.denote_input_entry (f := mkConstCached)] + + +theorem mkXorCached_le_size (aig : AIG α) {input : BinaryInput aig} : + aig.decls.size ≤ (aig.mkXorCached input).aig.decls.size := by + simp only [mkXorCached] + apply LawfulOperator.le_size_of_le_aig_size + apply LawfulOperator.le_size_of_le_aig_size + apply LawfulOperator.le_size_of_le_aig_size + omega + +theorem mkXorCached_decl_eq idx (aig : AIG α) (input : BinaryInput aig) {h : idx < aig.decls.size} + {h2} : + (aig.mkXorCached input).aig.decls[idx]'h2 = aig.decls[idx] := by + simp only [mkXorCached] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + . rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . apply LawfulOperator.lt_size_of_lt_aig_size + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + +instance : LawfulOperator α BinaryInput mkXorCached where + le_size := mkXorCached_le_size + decl_eq := by intros; apply mkXorCached_decl_eq + +@[simp] +theorem denote_mkXorCached {aig : AIG α} {input : BinaryInput aig} : + ⟦aig.mkXorCached input, assign⟧ + = + xor + ⟦aig, input.lhs, assign⟧ + ⟦aig, input.rhs, assign⟧ + := by + rw [← xor_as_aig] + simp [ + mkXorCached, + LawfulOperator.denote_mem_prefix (f := mkGateCached) input.lhs.hgate, + LawfulOperator.denote_mem_prefix (f := mkGateCached) input.rhs.hgate + ] + +theorem mkBEqCached_le_size (aig : AIG α) {input : BinaryInput aig} : + aig.decls.size ≤ (aig.mkBEqCached input).aig.decls.size := by + simp only [mkBEqCached] + apply LawfulOperator.le_size_of_le_aig_size + apply LawfulOperator.le_size_of_le_aig_size + apply LawfulOperator.le_size_of_le_aig_size + omega + +theorem mkBEqCached_decl_eq idx (aig : AIG α) (input : BinaryInput aig) {h : idx < aig.decls.size} + {h2} : + (aig.mkBEqCached input).aig.decls[idx]'h2 = aig.decls[idx] := by + simp only [mkBEqCached] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + . rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . apply LawfulOperator.lt_size_of_lt_aig_size + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + +instance : LawfulOperator α BinaryInput mkBEqCached where + le_size := mkBEqCached_le_size + decl_eq := by intros; apply mkBEqCached_decl_eq + +@[simp] +theorem denote_mkBEqCached {aig : AIG α} {input : BinaryInput aig} : + ⟦aig.mkBEqCached input, assign⟧ + = + (⟦aig, input.lhs, assign⟧ + == + ⟦aig, input.rhs, assign⟧) := by + rw [← beq_as_aig] + simp [ + mkBEqCached, + LawfulOperator.denote_mem_prefix (f := mkGateCached) input.lhs.hgate, + LawfulOperator.denote_mem_prefix (f := mkGateCached) input.rhs.hgate + ] + +theorem mkImpCached_le_size (aig : AIG α) (input : BinaryInput aig) : + aig.decls.size ≤ (aig.mkImpCached input).aig.decls.size := by + simp only [mkImpCached] + apply LawfulOperator.le_size_of_le_aig_size + apply LawfulOperator.le_size_of_le_aig_size (f := mkConstCached) + apply LawfulOperator.le_size_of_le_aig_size + omega + +theorem mkImpCached_decl_eq idx (aig : AIG α) (input : BinaryInput aig) {h : idx < aig.decls.size} + {h2} : + (aig.mkImpCached input).aig.decls[idx]'h2 = aig.decls[idx] := by + simp only [mkImpCached] + rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + rw [AIG.LawfulOperator.decl_eq (f := mkConstCached)] + · rw [AIG.LawfulOperator.decl_eq (f := mkGateCached)] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . apply LawfulOperator.lt_size_of_lt_aig_size (f := mkConstCached) + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + +instance : LawfulOperator α BinaryInput mkImpCached where + le_size := mkImpCached_le_size + decl_eq := by intros; apply mkImpCached_decl_eq + +@[simp] +theorem denote_mkImpCached {aig : AIG α} {input : BinaryInput aig} : + ⟦aig.mkImpCached input, assign⟧ + = + ( + !⟦aig, ⟨input.lhs.gate, input.lhs.hgate⟩, assign⟧ + || + ⟦aig, ⟨input.rhs.gate, input.rhs.hgate⟩, assign⟧ + ) := by + rw [← imp_as_aig] + simp [mkImpCached, LawfulOperator.denote_input_entry (f := mkConstCached)] + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/CachedLemmas.lean b/src/Std/Sat/AIG/CachedLemmas.lean new file mode 100644 index 000000000000..fd75ec2bf469 --- /dev/null +++ b/src/Std/Sat/AIG/CachedLemmas.lean @@ -0,0 +1,340 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Cached + +/-! +This module contains the theory of the cached AIG node creation functions. +It is mainly concerned with proving lemmas about the denotational semantics of the gate functions +in different scenarios, in particular reductions to the semantics of the non cached versions. +-/ + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] + +/-- +If we find a cached atom declaration in the AIG, denoting it is equivalent to denoting `AIG.mkAtom`. +-/ +theorem denote_mkAtom_cached {aig : AIG α} {hit} : + aig.cache.get? (.atom v) = some hit + → + ⟦aig, ⟨hit.idx, hit.hbound⟩, assign⟧ = ⟦aig.mkAtom v, assign⟧ := by + have := hit.hvalid + simp only [denote_mkAtom] + unfold denote denote.go + split <;> simp_all + +/-- +`mkAtomCached` does not modify the input AIG upon a cache hit. +-/ +theorem mkAtomCached_hit_aig (aig : AIG α) {hit} (hcache : aig.cache.get? (.atom var) = some hit) : + (aig.mkAtomCached var).aig = aig := by + simp only [mkAtomCached] + split <;> simp_all + +/-- +`mkAtomCached` pushes to the input AIG upon a cache miss. +-/ +theorem mkAtomCached_miss_aig (aig : AIG α) (hcache : aig.cache.get? (.atom var) = none) : + (aig.mkAtomCached var).aig.decls = aig.decls.push (.atom var) := by + simp only [mkAtomCached] + split <;> simp_all + +/-- +The AIG produced by `AIG.mkAtomCached` agrees with the input AIG on all indices that are valid for +both. +-/ +theorem mkAtomCached_decl_eq (aig : AIG α) (var : α) (idx : Nat) {h : idx < aig.decls.size} + {hbound} : + (aig.mkAtomCached var).aig.decls[idx]'hbound = aig.decls[idx] := by + match hcache : aig.cache.get? (.atom var) with + | some gate => + have := mkAtomCached_hit_aig aig hcache + simp [this] + | none => + have := mkAtomCached_miss_aig aig hcache + simp only [this, Array.get_push] + split + . rfl + . contradiction + +/-- +`AIG.mkAtomCached` never shrinks the underlying AIG. +-/ +theorem mkAtomCached_le_size (aig : AIG α) (var : α) : + aig.decls.size ≤ (aig.mkAtomCached var).aig.decls.size := by + dsimp only [mkAtomCached] + split + . simp + . simp_arith + +instance : LawfulOperator α (fun _ => α) mkAtomCached where + le_size := mkAtomCached_le_size + decl_eq := mkAtomCached_decl_eq + +/-- +The central equality theorem between `mkAtomCached` and `mkAtom`. +-/ +@[simp] +theorem mkAtomCached_eval_eq_mkAtom_eval {aig : AIG α} : + ⟦aig.mkAtomCached var, assign⟧ = ⟦aig.mkAtom var, assign⟧ := by + simp only [mkAtomCached] + split + . next heq1 => + rw [denote_mkAtom_cached heq1] + . simp [mkAtom, denote] + +/-- +If we find a cached const declaration in the AIG, denoting it is equivalent to denoting +`AIG.mkConst`. +-/ +theorem denote_mkConst_cached {aig : AIG α} {hit} : + aig.cache.get? (.const b) = some hit + → + ⟦aig, ⟨hit.idx, hit.hbound⟩, assign⟧ = ⟦aig.mkConst b, assign⟧ := by + have := hit.hvalid + simp only [denote_mkConst] + unfold denote denote.go + split <;> simp_all + +/-- +`mkConstCached` does not modify the input AIG upon a cache hit. +-/ +theorem mkConstCached_hit_aig (aig : AIG α) {hit} + (hcache : aig.cache.get? (.const val) = some hit) : + (aig.mkConstCached val).aig = aig := by + simp only [mkConstCached] + split <;> simp_all + +/-- +`mkConstCached` pushes to the input AIG upon a cache miss. +-/ +theorem mkConstCached_miss_aig (aig : AIG α) (hcache : aig.cache.get? (.const val) = none) : + (aig.mkConstCached val).aig.decls = aig.decls.push (.const val) := by + simp only [mkConstCached] + split <;> simp_all + +/-- +The AIG produced by `AIG.mkConstCached` agrees with the input AIG on all indices that are valid for +both. +-/ +theorem mkConstCached_decl_eq (aig : AIG α) (val : Bool) (idx : Nat) {h : idx < aig.decls.size} + {hbound} : + (aig.mkConstCached val).aig.decls[idx]'hbound = aig.decls[idx] := by + match hcache : aig.cache.get? (.const val) with + | some gate => + have := mkConstCached_hit_aig aig hcache + simp [this] + | none => + have := mkConstCached_miss_aig aig hcache + simp only [this, Array.get_push] + split + . rfl + . contradiction + +/-- +`AIG.mkConstCached` never shrinks the underlying AIG. +-/ +theorem mkConstCached_le_size (aig : AIG α) (val : Bool) : + aig.decls.size ≤ (aig.mkConstCached val).aig.decls.size := by + dsimp only [mkConstCached] + split + . simp + . simp_arith + +instance : LawfulOperator α (fun _ => Bool) mkConstCached where + le_size := mkConstCached_le_size + decl_eq := by + intros + apply mkConstCached_decl_eq + +/-- +The central equality theorem between `mkConstCached` and `mkConst`. +-/ +@[simp] +theorem mkConstCached_eval_eq_mkConst_eval {aig : AIG α} : + ⟦aig.mkConstCached val, assign⟧ = ⟦aig.mkConst val, assign⟧ := by + simp only [mkConstCached] + split + . next heq1 => + rw [denote_mkConst_cached heq1] + . simp [mkConst, denote] + +/-- +If we find a cached gate declaration in the AIG, denoting it is equivalent to denoting `AIG.mkGate`. +-/ +theorem denote_mkGate_cached {aig : AIG α} {input} {hit} : + aig.cache.get? (.gate input.lhs.ref.gate input.rhs.ref.gate input.lhs.inv input.rhs.inv) = some hit + → + ⟦⟨aig, hit.idx, hit.hbound⟩, assign⟧ + = + ⟦aig.mkGate input, assign⟧ := by + intros + have := hit.hvalid + simp only [denote_mkGate] + conv => + lhs + unfold denote denote.go + split <;> simp_all[denote] + +theorem mkGateCached.go_le_size (aig : AIG α) (input : GateInput aig) : + aig.decls.size ≤ (go aig input).aig.decls.size := by + dsimp only [go] + split + . simp + . split + . simp_arith [mkConstCached_le_size] + . simp_arith [mkConstCached_le_size] + . simp_arith [mkConstCached_le_size] + . simp_arith [mkConstCached_le_size] + . simp_arith + . simp_arith + . simp_arith + . simp_arith + . split + . simp_arith + . split <;> simp_arith [mkConstCached_le_size] + +/-- +`AIG.mkGateCached` never shrinks the underlying AIG. +-/ +theorem mkGateCached_le_size (aig : AIG α) (input : GateInput aig) + : aig.decls.size ≤ (aig.mkGateCached input).aig.decls.size := by + dsimp only [mkGateCached] + split + . apply mkGateCached.go_le_size + . apply mkGateCached.go_le_size + +theorem mkGateCached.go_decl_eq (aig : AIG α) (input : GateInput aig) : + ∀ (idx : Nat) (h1) (h2), (go aig input).aig.decls[idx]'h1 = aig.decls[idx]'h2 := by + generalize hres : go aig input = res + unfold go at hres + dsimp only at hres + split at hres + . rw [← hres] + intros + simp + . split at hres + . rw [← hres] + intros + rw [LawfulOperator.decl_eq (f := AIG.mkConstCached)] + . rw [← hres] + intros + rw [LawfulOperator.decl_eq (f := AIG.mkConstCached)] + . rw [← hres] + intros + rw [LawfulOperator.decl_eq (f := AIG.mkConstCached)] + . rw [← hres] + intros + rw [LawfulOperator.decl_eq (f := AIG.mkConstCached)] + . rw [← hres] + intros + simp + . rw [← hres] + intros + simp + . rw [← hres] + intros + simp + . rw [← hres] + intros + simp + . split at hres + . rw [← hres] + intros + simp + . split at hres + . rw [← hres] + intros + rw [AIG.LawfulOperator.decl_eq (f := AIG.mkConstCached)] + . rw [← hres] + dsimp only + intro idx h1 h2 + rw [Array.get_push] + simp [h2] + +/-- +The AIG produced by `AIG.mkGateCached` agrees with the input AIG on all indices that are valid for +both. +-/ +theorem mkGateCached_decl_eq (aig : AIG α) (input : GateInput aig) : + ∀ (idx : Nat) (h1) (h2), (aig.mkGateCached input).aig.decls[idx]'h1 = aig.decls[idx]'h2 := by + generalize hres : mkGateCached aig input = res + unfold mkGateCached at hres + dsimp only at hres + split at hres + all_goals + rw [← hres] + intros + rw [mkGateCached.go_decl_eq] + +instance : LawfulOperator α GateInput mkGateCached where + le_size := mkGateCached_le_size + decl_eq := by + intros + apply mkGateCached_decl_eq + +theorem mkGateCached.go_eval_eq_mkGate_eval {aig : AIG α} {input : GateInput aig} : + ⟦go aig input, assign⟧ = ⟦aig.mkGate input, assign⟧ := by + simp only [go] + split + . next heq1 => + rw [denote_mkGate_cached heq1] + . split + . next heq _ => + simp_all [denote_idx_const heq] + . next heq _ => + simp_all [denote_idx_const heq] + . next heq _ _ _ => + simp_all [denote_idx_const heq] + . next heq _ _ _ => + simp_all [denote_idx_const heq] + . next heq _ _ _ => + simp_all [denote_idx_const heq] + . next heq _ _ _ => + simp_all [denote_idx_const heq] + . next heq _ _ _ _ => + simp_all [denote_idx_const heq] + . next heq _ _ _ => + simp_all [denote_idx_const heq] + . split + . next hif => + simp only [beq_false, Bool.and_eq_true, beq_iff_eq, Bool.not_eq_true'] at hif + rcases hif with ⟨⟨hifeq, hlinv⟩, hrinv⟩ + replace hifeq : input.lhs.ref = input.rhs.ref := by + rcases input with ⟨⟨⟨_, _⟩, _⟩, ⟨⟨_, _⟩, _⟩⟩ + simpa using hifeq + simp [hlinv, hrinv, hifeq] + . split + . next hif => + simp only [Bool.and_eq_true, beq_iff_eq] at hif + rcases hif with ⟨hifeq, hinv⟩ + replace hifeq : input.lhs.ref = input.rhs.ref := by + rcases input with ⟨⟨⟨_, _⟩, _⟩, ⟨⟨_, _⟩, _⟩⟩ + simpa using hifeq + simp [hifeq, hinv] + . simp [mkGate, denote] + +/-- +The central equality theorem between `mkGateCached` and `mkGate`. +-/ +@[simp] +theorem mkGateCached_eval_eq_mkGate_eval {aig : AIG α} {input : GateInput aig} : + ⟦aig.mkGateCached input, assign⟧ = ⟦aig.mkGate input, assign⟧ := by + simp only [mkGateCached] + split + . rw [mkGateCached.go_eval_eq_mkGate_eval] + . rw [mkGateCached.go_eval_eq_mkGate_eval] + simp only [denote_mkGate] + rw [Bool.and_comm] + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/If.lean b/src/Std/Sat/AIG/If.lean new file mode 100644 index 000000000000..ff3ab227361d --- /dev/null +++ b/src/Std/Sat/AIG/If.lean @@ -0,0 +1,300 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.CachedGatesLemmas +import Std.Sat.AIG.LawfulVecOperator + +/-! +Besides introducing a way to construct an if statement in an `AIG`, this module also demonstrates +a style of writing Lean code that minimizes the risk of linearity issues on the `AIG`. + +The idea is to always keep one `aig` variable around that contains the `AIG` and continously +shadow it. However, applying multiple operations to the `AIG` does often require `Ref.cast` to use +other inputs or `Ref`s created by previous operations in later ones. Applying a `Ref.cast` would +usually require keeping around the old `AIG` to state the theorem statement. Luckily in this +situation Lean is usually always able to infer the theorem statement on it's own. For this +reason the style goes as follows: +``` +let res := someLawfulOperator aig input +let aig := res.aig +let ref := res.ref +have := LawfulOperator.le_size (f := mkIfCached) .. +let input1 := input1.cast this +let input2 := input2.cast this +-- ... +-- Next `LawfulOperator` application +``` +This style also generalizes to applications of `LawfulVecOperator`s. +-/ + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] + +open AIG + +def mkIfCached (aig : AIG α) (input : TernaryInput aig) : Entrypoint α := + -- if d then l else r = ((d && l) || (!d && r)) + let res := aig.mkAndCached ⟨input.discr, input.lhs⟩ + let aig := res.aig + let lhsRef := res.ref + let input := input.cast <| by apply AIG.LawfulOperator.le_size (f := mkAndCached) + let res := aig.mkNotCached input.discr + let aig := res.aig + let notDiscr := res.ref + let input := input.cast <| by apply AIG.LawfulOperator.le_size (f := mkNotCached) + let res := aig.mkAndCached ⟨notDiscr, input.rhs⟩ + let aig := res.aig + let rhsRef := res.ref + let lhsRef := lhsRef.cast <| by + apply AIG.LawfulOperator.le_size_of_le_aig_size (f := mkAndCached) + apply AIG.LawfulOperator.le_size (f := mkNotCached) + aig.mkOrCached ⟨lhsRef, rhsRef⟩ + +instance : LawfulOperator α TernaryInput mkIfCached where + le_size := by + intros + unfold mkIfCached + dsimp only + apply LawfulOperator.le_size_of_le_aig_size (f := mkOrCached) + apply LawfulOperator.le_size_of_le_aig_size (f := mkAndCached) + apply LawfulOperator.le_size_of_le_aig_size (f := mkNotCached) + apply LawfulOperator.le_size (f := mkAndCached) + decl_eq := by + intros + unfold mkIfCached + dsimp only + rw [LawfulOperator.decl_eq (f := mkOrCached)] + rw [LawfulOperator.decl_eq (f := mkAndCached)] + rw [LawfulOperator.decl_eq (f := mkNotCached)] + rw [LawfulOperator.decl_eq (f := mkAndCached)] + . apply LawfulOperator.lt_size_of_lt_aig_size (f := mkAndCached) + omega + . apply LawfulOperator.lt_size_of_lt_aig_size (f := mkNotCached) + apply LawfulOperator.lt_size_of_lt_aig_size (f := mkAndCached) + omega + . apply LawfulOperator.lt_size_of_lt_aig_size (f := mkAndCached) + apply LawfulOperator.lt_size_of_lt_aig_size (f := mkNotCached) + apply LawfulOperator.lt_size_of_lt_aig_size (f := mkAndCached) + omega + +theorem if_as_bool (d l r : Bool) : (if d then l else r) = ((d && l) || (!d && r)) := by + revert d l r + decide + +@[simp] +theorem denote_mkIfCached {aig : AIG α} {input : TernaryInput aig} : + ⟦aig.mkIfCached input, assign⟧ + = + if ⟦aig, input.discr, assign⟧ then ⟦aig, input.lhs, assign⟧ else ⟦aig, input.rhs, assign⟧ := by + rw [if_as_bool] + unfold mkIfCached + dsimp only + simp only [TernaryInput.cast, Ref_cast', id_eq, Int.reduceNeg, denote_mkOrCached, + denote_projected_entry, denote_mkAndCached, denote_mkNotCached] + congr 2 + . rw [LawfulOperator.denote_mem_prefix] + rw [LawfulOperator.denote_mem_prefix] + . simp + . simp [Ref.hgate] + . rw [LawfulOperator.denote_mem_prefix] + . rw [LawfulOperator.denote_mem_prefix] + rw [LawfulOperator.denote_mem_prefix] + +namespace RefVec + +structure IfInput (aig : AIG α) (w : Nat) where + discr : Ref aig + lhs : RefVec aig w + rhs : RefVec aig w + +def ite (aig : AIG α) (input : IfInput aig w) : RefVecEntry α w := + let ⟨discr, lhs, rhs⟩ := input + go aig 0 (by omega) discr lhs rhs .empty +where + go {w : Nat} (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) (discr : Ref aig) + (lhs rhs : RefVec aig w) (s : RefVec aig curr) : RefVecEntry α w := + if hcurr : curr < w then + let input := ⟨discr, lhs.get curr hcurr, rhs.get curr hcurr⟩ + let res := mkIfCached aig input + let aig := res.aig + let ref := res.ref + have := LawfulOperator.le_size (f := mkIfCached) .. + let discr := discr.cast this + let lhs := lhs.cast this + let rhs := rhs.cast this + let s := s.cast this + let s := s.push ref + go aig (curr + 1) (by omega) discr lhs rhs s + else + have : curr = w := by omega + ⟨aig, this ▸ s⟩ +termination_by w - curr + +namespace ite + +theorem go_le_size (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) (discr : Ref aig) + (lhs rhs : RefVec aig w) (s : RefVec aig curr) : + aig.decls.size ≤ (go aig curr hcurr discr lhs rhs s).aig.decls.size := by + unfold go + dsimp only + split + . refine Nat.le_trans ?_ (by apply go_le_size) + apply LawfulOperator.le_size (f := mkIfCached) + . simp +termination_by w - curr + +theorem go_decl_eq (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) (discr : Ref aig) + (lhs rhs : RefVec aig w) (s : RefVec aig curr) : + ∀ (idx : Nat) (h1) (h2), + (go aig curr hcurr discr lhs rhs s).aig.decls[idx]'h2 = aig.decls[idx]'h1 := by + generalize hgo : go aig curr hcurr discr lhs rhs s = res + unfold go at hgo + dsimp only at hgo + split at hgo + . rw [← hgo] + intro idx h1 h2 + rw [go_decl_eq] + rw [AIG.LawfulOperator.decl_eq (f := AIG.mkIfCached)] + apply AIG.LawfulOperator.lt_size_of_lt_aig_size (f := AIG.mkIfCached) + assumption + . simp [← hgo] +termination_by w - curr + +end ite + +instance : LawfulVecOperator α IfInput ite where + le_size := by + intros + unfold ite + apply ite.go_le_size + decl_eq := by + intros + unfold ite + rw [ite.go_decl_eq] + +namespace ite + +theorem go_get_aux {w : Nat} (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) (discr : Ref aig) + (lhs rhs : RefVec aig w) (s : RefVec aig curr) : + ∀ (idx : Nat) (hidx : idx < curr) (hfoo), + (go aig curr hcurr discr lhs rhs s).vec.get idx (by omega) + = + (s.get idx hidx).cast hfoo := by + intro idx hidx + generalize hgo : go aig curr hcurr discr lhs rhs s = res + unfold go at hgo + dsimp only at hgo + split at hgo + . rw [← hgo] + intros + rw [go_get_aux] + rw [AIG.RefVec.get_push_ref_lt] + . simp only [Ref.cast, Ref.mk.injEq] + rw [AIG.RefVec.get_cast] + . simp + . assumption + . apply go_le_size + . rw [← hgo] + simp only [Nat.le_refl, get, Ref_cast', Ref.mk.injEq, true_implies] + have : curr = w := by omega + subst this + simp +termination_by w - curr + +theorem go_get {w : Nat} (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) (discr : Ref aig) + (lhs rhs : RefVec aig w) (s : RefVec aig curr) : + ∀ (idx : Nat) (hidx : idx < curr), + (go aig curr hcurr discr lhs rhs s).vec.get idx (by omega) + = + (s.get idx hidx).cast (by apply go_le_size) := by + intro idx hidx + apply go_get_aux + +theorem go_denote_mem_prefix {w : Nat} (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) + (discr : Ref aig) (lhs rhs : RefVec aig w) (s : RefVec aig curr) (start : Nat) (hstart) : + ⟦ + (go aig curr hcurr discr lhs rhs s).aig, + ⟨start, by apply Nat.lt_of_lt_of_le; exact hstart; apply go_le_size⟩, + assign + ⟧ + = + ⟦aig, ⟨start, hstart⟩, assign⟧ := by + apply denote.eq_of_isPrefix (entry := ⟨aig, start,hstart⟩) + apply IsPrefix.of + . intros + apply go_decl_eq + . intros + apply go_le_size + +theorem denote_go {w : Nat} (aig : AIG α) (curr : Nat) (hcurr : curr ≤ w) (discr : Ref aig) + (lhs rhs : RefVec aig w) (s : RefVec aig curr) : + ∀ (idx : Nat) (hidx1 : idx < w), + curr ≤ idx + → + ⟦ + (go aig curr hcurr discr lhs rhs s).aig, + (go aig curr hcurr discr lhs rhs s).vec.get idx hidx1, + assign + ⟧ + = + if ⟦aig, discr, assign⟧ then + ⟦aig, lhs.get idx hidx1, assign⟧ + else + ⟦aig, rhs.get idx hidx1, assign⟧ := by + intro idx hidx1 hidx2 + generalize hgo : go aig curr hcurr discr lhs rhs s = res + unfold go at hgo + dsimp only at hgo + split at hgo + . cases Nat.eq_or_lt_of_le hidx2 with + | inl heq => + subst heq + rw [← hgo] + rw [go_get] + rw [AIG.RefVec.get_push_ref_eq'] + . rw [go_denote_mem_prefix] + . simp + . simp [Ref.hgate] + . omega + | inr heq => + rw [← hgo] + rw [denote_go] + . rw [LawfulOperator.denote_mem_prefix (f := mkIfCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkIfCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkIfCached)] + . simp + . simp [Ref.hgate] + . simp [Ref.hgate] + . simp [Ref.hgate] + . omega + . omega +termination_by w - curr + +end ite + +@[simp] +theorem denote_ite {aig : AIG α} {input : IfInput aig w} : + ∀ (idx : Nat) (hidx : idx < w), + ⟦(ite aig input).aig, (ite aig input).vec.get idx hidx, assign⟧ + = + if ⟦aig, input.discr, assign⟧ then + ⟦aig, input.lhs.get idx hidx, assign⟧ + else + ⟦aig, input.rhs.get idx hidx, assign⟧ := by + intro idx hidx + unfold ite + dsimp only + rw [ite.denote_go] + omega +end RefVec + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/LawfulOperator.lean b/src/Std/Sat/AIG/LawfulOperator.lean new file mode 100644 index 000000000000..a2338aa6c810 --- /dev/null +++ b/src/Std/Sat/AIG/LawfulOperator.lean @@ -0,0 +1,155 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Basic + +/-! +The lawful operator framework provides free theorems around the typeclass `LawfulOperator`. +Its definition is based on section 3.3 of the AIGNET paper. +-/ + +namespace Std +namespace Sat + +namespace AIG + +/-- +`decls1` is a prefix of `decls2` +-/ +structure IsPrefix (decls1 decls2 : Array (Decl α)) : Prop where + of :: + /-- + The prefix may never be longer than the other array. + -/ + size_le : decls1.size ≤ decls2.size + /-- + The prefix and the other array must agree on all elements up until the bound of the prefix + -/ + idx_eq : ∀ idx (h : idx < decls1.size), decls2[idx]'(by omega) = decls1[idx]'h + +/-- +If `decls1` is a prefix of `decls2` and we start evaluating `decls2` at an +index in bounds of `decls1` we can evaluate at `decls1`. +-/ +theorem denote.go_eq_of_isPrefix (decls1 decls2 : Array (Decl α)) (start : Nat) {hdag1} {hdag2} + {hbounds1} {hbounds2} (hprefix : IsPrefix decls1 decls2) : + denote.go start decls2 assign hbounds2 hdag2 + = + denote.go start decls1 assign hbounds1 hdag1 := by + unfold denote.go + have hidx1 := hprefix.idx_eq start hbounds1 + split + . next heq => + rw [hidx1] at heq + split <;> simp_all + . next heq => + rw [hidx1] at heq + split <;> simp_all + . next lhs rhs linv rinv heq => + rw [hidx1] at heq + have := hdag1 hbounds1 heq + have hidx2 := hprefix.idx_eq lhs (by omega) + have hidx3 := hprefix.idx_eq rhs (by omega) + split + . simp_all + . simp_all + . simp_all + congr 2 + . apply denote.go_eq_of_isPrefix + assumption + . apply denote.go_eq_of_isPrefix + assumption +termination_by start + +variable {α : Type} [Hashable α] [DecidableEq α] + +@[inherit_doc denote.go_eq_of_isPrefix] +theorem denote.eq_of_isPrefix (entry : Entrypoint α) (newAIG : AIG α) + (hprefix : IsPrefix entry.aig.decls newAIG.decls) : + ⟦newAIG, ⟨entry.ref.gate, (by have := entry.ref.hgate; have := hprefix.size_le; omega)⟩, assign⟧ + = + ⟦entry, assign⟧ + := by + unfold denote + apply denote.go_eq_of_isPrefix + assumption + +abbrev ExtendingEntrypoint (aig : AIG α) : Type := + { entry : Entrypoint α // aig.decls.size ≤ entry.aig.decls.size } + +abbrev ExtendingRefVecEntry (aig : AIG α) (len : Nat) : Type := + { ref : RefVecEntry α len // aig.decls.size ≤ ref.aig.decls.size } + +/-- +A function `f` that takes some `aig : AIG α` and an argument of type `β aig` is called a lawful +AIG operator if it only extends the `AIG` but never modifies already existing nodes. + +This guarantees that applying such a function will not change the semantics of any existing parts +of the circuit, allowing us to perform local reasoning on the AIG. +-/ +class LawfulOperator (α : Type) [Hashable α] [DecidableEq α] + (β : AIG α → Type) (f : (aig : AIG α) → β aig → Entrypoint α) where + le_size : ∀ (aig : AIG α) (input : β aig), aig.decls.size ≤ (f aig input).aig.decls.size + decl_eq : ∀ (aig : AIG α) (input : β aig) (idx : Nat) (h1 : idx < aig.decls.size) (h2), + (f aig input).aig.decls[idx]'h2 = aig.decls[idx]'h1 + +namespace LawfulOperator + +variable {β : AIG α → Type} +variable {f : (aig : AIG α) → β aig → Entrypoint α} [LawfulOperator α β f] + +theorem isPrefix_aig (aig : AIG α) (input : β aig) : + IsPrefix aig.decls (f aig input).aig.decls := by + apply IsPrefix.of + . intro idx h + apply decl_eq + . apply le_size + +theorem lt_size (entry : Entrypoint α) (input : β entry.aig) : + entry.ref.gate < (f entry.aig input).aig.decls.size := by + have h1 := entry.ref.hgate + have h2 : entry.aig.decls.size ≤ (f entry.aig input).aig.decls.size := by + apply le_size + omega + +theorem lt_size_of_lt_aig_size (aig : AIG α) (input : β aig) (h : x < aig.decls.size) : + x < (f aig input).aig.decls.size := by + apply Nat.lt_of_lt_of_le + . exact h + . exact le_size aig input + +theorem le_size_of_le_aig_size (aig : AIG α) (input : β aig) (h : x ≤ aig.decls.size) : + x ≤ (f aig input).aig.decls.size := by + apply Nat.le_trans + . exact h + . exact le_size aig input + +@[simp] +theorem denote_input_entry (entry : Entrypoint α) {input} {h} : + ⟦(f entry.aig input).aig, ⟨entry.ref.gate, h⟩, assign⟧ + = + ⟦entry, assign⟧ := by + apply denote.eq_of_isPrefix + apply isPrefix_aig + +@[simp] +theorem denote_cast_entry (entry : Entrypoint α) {input} {h} : + ⟦(f entry.aig input).aig, entry.ref.cast h, assign⟧ + = + ⟦entry, assign⟧ := by + simp [Ref.cast] + +theorem denote_mem_prefix {aig : AIG α} {input} (h) : + ⟦(f aig input).aig, ⟨start, by apply lt_size_of_lt_aig_size; omega⟩, assign⟧ + = + ⟦aig, ⟨start, h⟩, assign⟧ := by + rw [denote_input_entry ⟨aig, start, h⟩] + +end LawfulOperator + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/LawfulVecOperator.lean b/src/Std/Sat/AIG/LawfulVecOperator.lean new file mode 100644 index 000000000000..8432c2fbdf06 --- /dev/null +++ b/src/Std/Sat/AIG/LawfulVecOperator.lean @@ -0,0 +1,87 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.LawfulOperator +import Std.Sat.AIG.RefVec + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] + +class LawfulVecOperator (α : Type) [Hashable α] [DecidableEq α] + (β : AIG α → Nat → Type) (f : {len : Nat} → (aig : AIG α) → β aig len → RefVecEntry α len) where + le_size : ∀ (aig : AIG α) (input : β aig len), aig.decls.size ≤ (f aig input).aig.decls.size + decl_eq : ∀ (aig : AIG α) (input : β aig len) (idx : Nat) (h1 : idx < aig.decls.size) (h2), + (f aig input).aig.decls[idx]'h2 = aig.decls[idx]'h1 + +namespace LawfulVecOperator + +variable {β : AIG α → Nat → Type} +variable {f : {len : Nat} → (aig : AIG α) → β aig len → RefVecEntry α len} +variable [LawfulVecOperator α β f] + +theorem isPrefix_aig (aig : AIG α) (input : β aig len) : + IsPrefix aig.decls (f aig input).aig.decls := by + apply IsPrefix.of + . intro idx h + apply decl_eq + . apply le_size + +theorem lt_size (entry : Entrypoint α) (input : β entry.aig len) : + entry.ref.gate < (f entry.aig input).aig.decls.size := by + have h1 := entry.ref.hgate + have h2 : entry.aig.decls.size ≤ (f entry.aig input).aig.decls.size := by + apply le_size + omega + +theorem lt_size_of_lt_aig_size (aig : AIG α) (input : β aig len) (h : x < aig.decls.size) : + x < (f aig input).aig.decls.size := by + apply Nat.lt_of_lt_of_le + . exact h + . exact le_size aig input + +theorem le_size_of_le_aig_size (aig : AIG α) (input : β aig len) (h : x ≤ aig.decls.size) : + x ≤ (f aig input).aig.decls.size := by + apply Nat.le_trans + . exact h + . exact le_size aig input + +@[simp] +theorem denote_input_entry (entry : Entrypoint α) {input : β entry.aig len} {h} : + ⟦(f entry.aig input).aig, ⟨entry.ref.gate, h⟩, assign ⟧ + = + ⟦entry, assign⟧ := by + apply denote.eq_of_isPrefix + apply isPrefix_aig + +@[simp] +theorem denote_cast_entry (entry : Entrypoint α) {input : β entry.aig len} {h} : + ⟦(f entry.aig input).aig, entry.ref.cast h, assign⟧ + = + ⟦entry, assign⟧ := by + simp [Ref.cast] + +theorem denote_mem_prefix {aig : AIG α} {input : β aig len} (h) : + ⟦(f aig input).aig, ⟨start, by apply lt_size_of_lt_aig_size; omega⟩, assign⟧ + = + ⟦aig, ⟨start, h⟩, assign⟧ := by + rw [denote_input_entry ⟨aig, start, h⟩] + +@[simp] +theorem denote_input_vec (s : RefVecEntry α len) {input : β s.aig len} {hcast} : + ⟦(f s.aig input).aig, (s.vec.get idx hidx).cast hcast, assign⟧ + = + ⟦s.aig, s.vec.get idx hidx, assign⟧ := by + rw [denote_mem_prefix] + rfl + +end LawfulVecOperator +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/Lemmas.lean b/src/Std/Sat/AIG/Lemmas.lean new file mode 100644 index 000000000000..c14108f301af --- /dev/null +++ b/src/Std/Sat/AIG/Lemmas.lean @@ -0,0 +1,290 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Basic +import Std.Sat.AIG.LawfulOperator + +/-! +This module provides a basic theory around the naive AIG node creation functions. It is mostly +fundamental work for the cached versions later on. +-/ + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] + +@[simp] +theorem Ref_cast {aig1 aig2 : AIG α} (ref : Ref aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + (ref.cast h).gate = ref.gate := rfl + +@[simp] +theorem Ref_cast' {aig1 aig2 : AIG α} (ref : Ref aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + (ref.cast h) = ⟨ref.gate, by have := ref.hgate; omega⟩ := rfl + +@[simp] +theorem Fanin_cast_ref {aig1 aig2 : AIG α} (fanin : Fanin aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + (fanin.cast h).ref = fanin.ref.cast h := rfl + +@[simp] +theorem Fanin_cast_inv {aig1 aig2 : AIG α} (fanin : Fanin aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + (fanin.cast h).inv = fanin.inv := rfl + +@[simp] +theorem GateInput_cast_lhs {aig1 aig2 : AIG α} (input : GateInput aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + (input.cast h).lhs = input.lhs.cast h := rfl + +@[simp] +theorem GateInput_cast_rhs {aig1 aig2 : AIG α} (input : GateInput aig1) + (h : aig1.decls.size ≤ aig2.decls.size) : + (input.cast h).rhs = input.rhs.cast h := rfl + +@[simp] +theorem BinaryInput.cast_each {aig1 aig2 : AIG α} (lhs rhs : Ref aig1) + (h1 h2 : aig1.decls.size ≤ aig2.decls.size) : + BinaryInput.mk (lhs.cast h1) (rhs.cast h2) = (BinaryInput.mk lhs rhs).cast h2 := by + simp [BinaryInput.cast] + +@[simp] +theorem denote_projected_entry {entry : Entrypoint α} : + ⟦entry.aig, entry.ref, assign⟧ = ⟦entry, assign⟧ := by + cases entry; simp + +@[simp] +theorem denote_projected_entry' {entry : Entrypoint α} : + ⟦entry.aig, ⟨entry.ref.gate, entry.ref.hgate⟩, assign⟧ = ⟦entry, assign⟧ := by + cases entry; simp + +/-- +`AIG.mkGate` never shrinks the underlying AIG. +-/ +theorem mkGate_le_size (aig : AIG α) (input : GateInput aig) : + aig.decls.size ≤ (aig.mkGate input).aig.decls.size := by + simp_arith [mkGate] + +/-- +The AIG produced by `AIG.mkGate` agrees with the input AIG on all indices that are valid for both. +-/ +theorem mkGate_decl_eq idx (aig : AIG α) (input : GateInput aig) {h : idx < aig.decls.size} : + have := mkGate_le_size aig input + (aig.mkGate input).aig.decls[idx]'(by omega) = aig.decls[idx] := by + simp only [mkGate, Array.get_push] + split + . rfl + . contradiction + +instance : LawfulOperator α GateInput mkGate where + le_size := mkGate_le_size + decl_eq := by + intros + apply mkGate_decl_eq + +@[simp] +theorem denote_mkGate {aig : AIG α} {input : GateInput aig} : + ⟦aig.mkGate input, assign⟧ + = + ( + (xor ⟦aig, input.lhs.ref, assign⟧ input.lhs.inv) + && + (xor ⟦aig, input.rhs.ref, assign⟧ input.rhs.inv) + ) := by + conv => + lhs + unfold denote denote.go + split + . next heq => + rw [mkGate, Array.get_push_eq] at heq + contradiction + . next heq => + rw [mkGate, Array.get_push_eq] at heq + contradiction + . next heq => + rw [mkGate, Array.get_push_eq] at heq + injection heq with heq1 heq2 heq3 heq4 + dsimp only + congr 2 + . unfold denote + simp only [heq1] + apply denote.go_eq_of_isPrefix + apply LawfulOperator.isPrefix_aig + . simp [heq3] + . unfold denote + simp only [heq2] + apply denote.go_eq_of_isPrefix + apply LawfulOperator.isPrefix_aig + . simp [heq4] + +/-- +`AIG.mkAtom` never shrinks the underlying AIG. +-/ +theorem mkAtom_le_size (aig : AIG α) (var : α) : + aig.decls.size ≤ (aig.mkAtom var).aig.decls.size := by + simp_arith [mkAtom] + +/-- +The AIG produced by `AIG.mkAtom` agrees with the input AIG on all indices that are valid for both. +-/ +theorem mkAtom_decl_eq (aig : AIG α) (var : α) (idx : Nat) {h : idx < aig.decls.size} {hbound} : + (aig.mkAtom var).aig.decls[idx]'hbound = aig.decls[idx] := by + simp only [mkAtom, Array.get_push] + split + . rfl + . contradiction + +instance : LawfulOperator α (fun _ => α) mkAtom where + le_size := mkAtom_le_size + decl_eq := by + intros + apply mkAtom_decl_eq + +@[simp] +theorem denote_mkAtom {aig : AIG α} : + ⟦(aig.mkAtom var), assign⟧ = assign var := by + unfold denote denote.go + split + . next heq => + rw [mkAtom, Array.get_push_eq] at heq + contradiction + . next heq => + rw [mkAtom, Array.get_push_eq] at heq + injection heq with heq + rw [heq] + . next heq => + rw [mkAtom, Array.get_push_eq] at heq + contradiction + +/-- +`AIG.mkConst` never shrinks the underlying AIG. +-/ +theorem mkConst_le_size (aig : AIG α) (val : Bool) : + aig.decls.size ≤ (aig.mkConst val).aig.decls.size := by + simp_arith [mkConst] + +/-- +The AIG produced by `AIG.mkConst` agrees with the input AIG on all indices that are valid for both. +-/ +theorem mkConst_decl_eq (aig : AIG α) (val : Bool) (idx : Nat) {h : idx < aig.decls.size} : + have := mkConst_le_size aig val + (aig.mkConst val).aig.decls[idx]'(by omega) = aig.decls[idx] := by + simp only [mkConst, Array.get_push] + split + . rfl + . contradiction + +instance : LawfulOperator α (fun _ => Bool) mkConst where + le_size := mkConst_le_size + decl_eq := by + intros + apply mkConst_decl_eq + +@[simp] +theorem denote_mkConst {aig : AIG α} : ⟦(aig.mkConst val), assign⟧ = val := by + unfold denote denote.go + split + . next heq => + rw [mkConst, Array.get_push_eq] at heq + injection heq with heq + rw [heq] + . next heq => + rw [mkConst, Array.get_push_eq] at heq + contradiction + . next heq => + rw [mkConst, Array.get_push_eq] at heq + contradiction + +/-- +If an index contains a `Decl.const` we know how to denote it. +-/ +theorem denote_idx_const {aig : AIG α} {hstart} (h : aig.decls[start]'hstart = .const b) : + ⟦aig, ⟨start, hstart⟩, assign⟧ = b := by + unfold denote denote.go + split <;> simp_all + +/-- +If an index contains a `Decl.atom` we know how to denote it. +-/ +theorem denote_idx_atom {aig : AIG α} {hstart} (h : aig.decls[start] = .atom a) : + ⟦aig, ⟨start, hstart⟩, assign⟧ = assign a := by + unfold denote denote.go + split <;> simp_all + +/-- +If an index contains a `Decl.gate` we know how to denote it. +-/ +theorem denote_idx_gate {aig : AIG α} {hstart} (h : aig.decls[start] = .gate lhs rhs linv rinv) : + ⟦aig, ⟨start, hstart⟩, assign⟧ + = + ( + (xor ⟦aig, ⟨lhs, by have := aig.invariant hstart h; omega⟩, assign⟧ linv) + && + (xor ⟦aig, ⟨rhs, by have := aig.invariant hstart h; omega⟩, assign⟧ rinv) + ) := by + unfold denote + conv => + lhs + unfold denote.go + split + . simp_all + . simp_all + . next heq => + rw [h] at heq + simp_all + +theorem idx_trichotomy (aig : AIG α) (hstart : start < aig.decls.size) {prop : Prop} + (hconst : ∀ b, aig.decls[start]'hstart = .const b → prop) + (hatom : ∀ a, aig.decls[start]'hstart = .atom a → prop) + (hgate : ∀ lhs rhs linv rinv, aig.decls[start]'hstart = .gate lhs rhs linv rinv → prop) + : prop := by + match h : aig.decls[start]'hstart with + | .const b => apply hconst; assumption + | .atom a => apply hatom; assumption + | .gate lhs rhs linv rinv => apply hgate; assumption + +theorem denote_idx_trichotomy {aig : AIG α} {hstart : start < aig.decls.size} + (hconst : ∀ b, aig.decls[start]'hstart = .const b → ⟦aig, ⟨start, hstart⟩, assign⟧ = res) + (hatom : ∀ a, aig.decls[start]'hstart = .atom a → ⟦aig, ⟨start, hstart⟩, assign⟧ = res) + (hgate : + ∀ lhs rhs linv rinv, + aig.decls[start]'hstart = .gate lhs rhs linv rinv + → + ⟦aig, ⟨start, hstart⟩, assign⟧ = res + ) : + ⟦aig, ⟨start, hstart⟩, assign⟧ = res := by + apply idx_trichotomy aig hstart + . exact hconst + . exact hatom + . exact hgate + +theorem mem_def {aig : AIG α} {a : α} : (a ∈ aig) ↔ ((.atom a) ∈ aig.decls) := by + simp [Membership.mem, Mem] + +theorem denote_congr (assign1 assign2 : α → Bool) (aig : AIG α) (idx : Nat) + (hidx : idx < aig.decls.size) (h : ∀ a, a ∈ aig → assign1 a = assign2 a) : + ⟦aig, ⟨idx, hidx⟩, assign1⟧ = ⟦aig, ⟨idx, hidx⟩, assign2⟧ := by + apply denote_idx_trichotomy + . intro b heq + simp [denote_idx_const heq] + . intro a heq + simp only [denote_idx_atom heq] + apply h + rw [mem_def, ← heq, Array.mem_def] + apply Array.getElem_mem_data + . intro lhs rhs linv rinv heq + simp only [denote_idx_gate heq] + have := aig.invariant hidx heq + rw [denote_congr assign1 assign2 aig lhs (by omega) h] + rw [denote_congr assign1 assign2 aig rhs (by omega) h] + +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/RefVec.lean b/src/Std/Sat/AIG/RefVec.lean new file mode 100644 index 000000000000..9bfe96382c45 --- /dev/null +++ b/src/Std/Sat/AIG/RefVec.lean @@ -0,0 +1,188 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.LawfulOperator +import Std.Sat.AIG.CachedGatesLemmas + +namespace Std +namespace Sat + +namespace AIG + +variable {α : Type} [Hashable α] [DecidableEq α] {aig : AIG α} + +namespace RefVec + +def empty : RefVec aig 0 where + refs := #[] + hlen := by simp + hrefs := by intros; contradiction + +@[inline] +def cast' {aig1 aig2 : AIG α} (s : RefVec aig1 len) + (h : + (∀ {i : Nat} (h : i < len), s.refs[i]'(by have := s.hlen; omega) < aig1.decls.size) + → ∀ {i : Nat} (h : i < len), s.refs[i]'(by have := s.hlen; omega) < aig2.decls.size) : + RefVec aig2 len := + { s with + hrefs := by + intros + apply h + · intros + apply s.hrefs + assumption + · assumption + } + +@[inline] +def cast {aig1 aig2 : AIG α} (s : RefVec aig1 len) (h : aig1.decls.size ≤ aig2.decls.size) : + RefVec aig2 len := + s.cast' <| by + intro hall i hi + specialize hall hi + omega + +@[inline] +def get (s : RefVec aig len) (idx : Nat) (hidx : idx < len) : Ref aig := + let ⟨refs, hlen, hrefs⟩ := s + let ref := refs[idx]'(by rw [hlen]; assumption) + ⟨ref, by apply hrefs; assumption⟩ + +@[inline] +def push (s : RefVec aig len) (ref : AIG.Ref aig) : RefVec aig (len + 1) := + let ⟨refs, hlen, hrefs⟩ := s + ⟨ + refs.push ref.gate, + by simp [hlen], + by + intro i hi + simp only [Array.get_push] + split + . apply hrefs + omega + . apply AIG.Ref.hgate + ⟩ + +@[simp] +theorem get_push_ref_eq (s : RefVec aig len) (ref : AIG.Ref aig) : + (s.push ref).get len (by omega) = ref := by + have := s.hlen + simp [get, push, ← this] + +-- This variant exists because it is sometimes hard to rewrite properly with DTT. +theorem get_push_ref_eq' (s : RefVec aig len) (ref : AIG.Ref aig) (idx : Nat) + (hidx : idx = len) : + (s.push ref).get idx (by omega) = ref := by + have := s.hlen + simp [get, push, ← this, hidx] + +theorem get_push_ref_lt (s : RefVec aig len) (ref : AIG.Ref aig) (idx : Nat) + (hidx : idx < len) : + (s.push ref).get idx (by omega) = s.get idx hidx := by + simp only [get, push, Ref.mk.injEq] + cases ref + simp only [Ref.mk.injEq] + rw [Array.get_push_lt] + +@[simp] +theorem get_cast {aig1 aig2 : AIG α} (s : RefVec aig1 len) (idx : Nat) (hidx : idx < len) + (hcast : aig1.decls.size ≤ aig2.decls.size) : + (s.cast hcast).get idx hidx + = + (s.get idx hidx).cast hcast := by + simp [cast, cast', get] + +@[inline] +def append (lhs : RefVec aig lw) (rhs : RefVec aig rw) : RefVec aig (lw + rw) := + let ⟨lrefs, hl1, hl2⟩ := lhs + let ⟨rrefs, hr1, hr2⟩ := rhs + ⟨ + lrefs ++ rrefs, + by simp [Array.size_append, hl1, hr1], + by + intro i h + by_cases hsplit : i < lrefs.size + . rw [Array.get_append_left] + apply hl2 + omega + . rw [Array.get_append_right] + . apply hr2 + omega + . omega + ⟩ + +theorem get_append (lhs : RefVec aig lw) (rhs : RefVec aig rw) (idx : Nat) + (hidx : idx < lw + rw) : + (lhs.append rhs).get idx hidx + = + if h : idx < lw then + lhs.get idx h + else + rhs.get (idx - lw) (by omega) := by + simp only [get, append] + split + . simp [Ref.mk.injEq] + rw [Array.get_append_left] + . simp only [Ref.mk.injEq] + rw [Array.get_append_right] + . simp [lhs.hlen] + . rw [lhs.hlen] + omega + +@[inline] +def getD (s : RefVec aig len) (idx : Nat) (alt : Ref aig) : Ref aig := + if hidx : idx < len then + s.get idx hidx + else + alt + +theorem get_in_bound (s : RefVec aig len) (idx : Nat) (alt : Ref aig) (hidx : idx < len) : + s.getD idx alt = s.get idx hidx := by + unfold getD + simp [hidx] + +theorem get_out_bound (s : RefVec aig len) (idx : Nat) (alt : Ref aig) (hidx : len ≤ idx) : + s.getD idx alt = alt := by + unfold getD + split + . omega + . rfl + +end RefVec + +structure BinaryRefVec (aig : AIG α) (len : Nat) where + lhs : RefVec aig len + rhs : RefVec aig len + +namespace BinaryRefVec + +@[inline] +def cast {aig1 aig2 : AIG α} (s : BinaryRefVec aig1 len) + (h : aig1.decls.size ≤ aig2.decls.size) : + BinaryRefVec aig2 len := + let ⟨lhs, rhs⟩ := s + ⟨lhs.cast h, rhs.cast h⟩ + +@[simp] +theorem lhs_get_cast {aig1 aig2 : AIG α} (s : BinaryRefVec aig1 len) (idx : Nat) + (hidx : idx < len) (hcast : aig1.decls.size ≤ aig2.decls.size) : + (s.cast hcast).lhs.get idx hidx + = + (s.lhs.get idx hidx).cast hcast := by + simp [cast] + +@[simp] +theorem rhs_get_cast {aig1 aig2 : AIG α} (s : BinaryRefVec aig1 len) (idx : Nat) + (hidx : idx < len) (hcast : aig1.decls.size ≤ aig2.decls.size) : + (s.cast hcast).rhs.get idx hidx + = + (s.rhs.get idx hidx).cast hcast := by + simp [cast] + +end BinaryRefVec +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/RefVecOperator.lean b/src/Std/Sat/AIG/RefVecOperator.lean new file mode 100644 index 000000000000..611f5c6a0ebb --- /dev/null +++ b/src/Std/Sat/AIG/RefVecOperator.lean @@ -0,0 +1,8 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.RefVecOperator.Map +import Std.Sat.AIG.RefVecOperator.Zip +import Std.Sat.AIG.RefVecOperator.Fold diff --git a/src/Std/Sat/AIG/RefVecOperator/Fold.lean b/src/Std/Sat/AIG/RefVecOperator/Fold.lean new file mode 100644 index 000000000000..61648766cfbd --- /dev/null +++ b/src/Std/Sat/AIG/RefVecOperator/Fold.lean @@ -0,0 +1,195 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.RefVec +import Std.Sat.AIG.LawfulVecOperator + +namespace Std +namespace Sat + +namespace AIG +namespace RefVec + +variable {α : Type} [Hashable α] [DecidableEq α] {aig : AIG α} + +structure FoldTarget (aig : AIG α) where + {len : Nat} + vec : RefVec aig len + func : (aig : AIG α) → BinaryInput aig → Entrypoint α + [lawful : LawfulOperator α BinaryInput func] + +attribute [instance] FoldTarget.lawful + +@[inline] +def FoldTarget.mkAnd {aig : AIG α} (vec : RefVec aig w) : FoldTarget aig where + vec := vec + func := mkAndCached + +@[specialize] +def fold (aig : AIG α) (target : FoldTarget aig) : Entrypoint α := + let res := aig.mkConstCached true + let aig := res.aig + let acc := res.ref + let input := target.vec.cast <| by + intros + apply LawfulOperator.le_size_of_le_aig_size (f := mkConstCached) + omega + go aig acc 0 target.len input target.func +where + @[specialize] + go (aig : AIG α) (acc : Ref aig) (idx : Nat) (len : Nat) (input : RefVec aig len) + (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) [LawfulOperator α BinaryInput f] : + Entrypoint α := + if hidx : idx < len then + let res := f aig ⟨acc, input.get idx hidx⟩ + let aig := res.aig + let newAcc := res.ref + let input := input.cast <| by + intros + apply LawfulOperator.le_size_of_le_aig_size (f := f) + omega + go aig newAcc (idx + 1) len input f + else + ⟨aig, acc⟩ + termination_by len - idx + +theorem fold.go_le_size {aig : AIG α} (acc : Ref aig) (idx : Nat) (s : RefVec aig len) + (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) [LawfulOperator α BinaryInput f] : + aig.decls.size ≤ (go aig acc idx len s f).1.decls.size := by + unfold go + split + . next h => + dsimp only + refine Nat.le_trans ?_ (by apply fold.go_le_size) + apply LawfulOperator.le_size + . simp + termination_by len - idx + +theorem fold_le_size {aig : AIG α} (target : FoldTarget aig) : + aig.decls.size ≤ (fold aig target).1.decls.size := by + unfold fold + dsimp only + refine Nat.le_trans ?_ (by apply fold.go_le_size) + apply LawfulOperator.le_size (f := mkConstCached) + +theorem fold.go_decl_eq {aig : AIG α} (acc : Ref aig) (i : Nat) (s : RefVec aig len) + (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) [LawfulOperator α BinaryInput f] : + ∀ (idx : Nat) (h1) (h2), + (go aig acc i len s f).1.decls[idx]'h2 = aig.decls[idx]'h1 := by + generalize hgo : go aig acc i len s f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + rw [← hgo] + intros + rw [go_decl_eq] + rw [LawfulOperator.decl_eq] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . rw [← hgo] + intros + simp +termination_by len - i + +theorem fold_decl_eq {aig : AIG α} (target : FoldTarget aig) : + ∀ idx (h1 : idx < aig.decls.size) (h2), + (fold aig target).1.decls[idx]'h2 + = + aig.decls[idx]'h1 := by + intros + unfold fold + dsimp only + rw [fold.go_decl_eq] + rw [LawfulOperator.decl_eq (f := mkConstCached)] + apply LawfulOperator.lt_size_of_lt_aig_size (f := mkConstCached) + assumption + +instance : LawfulOperator α FoldTarget fold where + le_size := by intros; apply fold_le_size + decl_eq := by intros; apply fold_decl_eq + +namespace fold + +theorem denote_go_and {aig : AIG α} (acc : AIG.Ref aig) (curr : Nat) (hcurr : curr ≤ len) + (input : RefVec aig len) : + ⟦ + (go aig acc curr len input mkAndCached).aig, + (go aig acc curr len input mkAndCached).ref, + assign + ⟧ + = + ( + ⟦aig, acc, assign⟧ + ∧ + (∀ (idx : Nat) (hidx1 : idx < len), curr ≤ idx → ⟦aig, input.get idx hidx1, assign⟧) + ) := by + generalize hgo : go aig acc curr len input mkAndCached = res + unfold go at hgo + split at hgo + . dsimp only at hgo + rw [← hgo] + rw [denote_go_and] + . simp only [denote_projected_entry, denote_mkAndCached, Bool.and_eq_true, get_cast, + eq_iff_iff] + constructor + . intro h + rcases h with ⟨⟨h1, h2⟩, h3⟩ + constructor + . assumption + . intro idx hidx1 hidx2 + cases Nat.eq_or_lt_of_le hidx2 with + | inl heq => simpa [heq] using h2 + | inr hlt => + specialize h3 idx hidx1 (by omega) + rw [← h3] + rw [AIG.LawfulOperator.denote_mem_prefix (f := AIG.mkAndCached)] + . simp + . simp [Ref.hgate] + . simp only [and_imp] + intro hacc hrest + constructor + . simp [hacc, hrest] + . intro idx hidx1 hidx2 + specialize hrest idx hidx1 (by omega) + rw [← hrest] + rw [AIG.LawfulOperator.denote_mem_prefix (f := AIG.mkAndCached)] + . simp + . simp [Ref.hgate] + . omega + . rw [← hgo] + simp only [eq_iff_iff, iff_self_and] + omega +termination_by len - curr + +end fold + +@[simp] +theorem denote_fold_and {aig : AIG α} (s : RefVec aig len) : + ⟦(fold aig (FoldTarget.mkAnd s)), assign⟧ + ↔ + (∀ (idx : Nat) (hidx : idx < len), ⟦aig, s.get idx hidx, assign⟧) := by + unfold fold + simp only [FoldTarget.mkAnd] + rw [fold.denote_go_and] + . simp only [denote_projected_entry, mkConstCached_eval_eq_mkConst_eval, denote_mkConst, + Nat.zero_le, get_cast, Ref_cast', true_implies, true_and] + constructor + . intro h idx hidx + specialize h idx hidx + rw [AIG.LawfulOperator.denote_mem_prefix (f := mkConstCached)] at h + rw [← h] + . intro h idx hidx + specialize h idx hidx + rw [AIG.LawfulOperator.denote_mem_prefix (f := mkConstCached)] + . simp only [← h] + . apply RefVec.hrefs + simp [FoldTarget.mkAnd, hidx] + . omega + +end RefVec +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/RefVecOperator/Map.lean b/src/Std/Sat/AIG/RefVecOperator/Map.lean new file mode 100644 index 000000000000..7a705eab85c6 --- /dev/null +++ b/src/Std/Sat/AIG/RefVecOperator/Map.lean @@ -0,0 +1,239 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.RefVec +import Std.Sat.AIG.LawfulVecOperator + +namespace Std +namespace Sat + +namespace AIG +namespace RefVec + +variable {α : Type} [Hashable α] [DecidableEq α] {aig : AIG α} + +class LawfulMapOperator (α : Type) [Hashable α] [DecidableEq α] + (f : (aig : AIG α) → Ref aig → Entrypoint α) [LawfulOperator α Ref f] : Prop + where + chainable : ∀ (aig : AIG α) (input1 input2 : Ref aig) (h) (assign), + ⟦f (f aig input1).aig (input2.cast h), assign⟧ + = + ⟦f aig input2, assign⟧ + +namespace LawfulMapOperator + +@[simp] +theorem denote_prefix_cast_ref {aig : AIG α} {input1 input2 : Ref aig} + {f : (aig : AIG α) → Ref aig → Entrypoint α} [LawfulOperator α Ref f] [LawfulMapOperator α f] + {h} : + ⟦f (f aig input1).aig (input2.cast h), assign⟧ + = + ⟦f aig input2, assign⟧ := by + rw [LawfulMapOperator.chainable] + +instance : LawfulMapOperator α mkNotCached where + chainable := by + intros + simp only [Ref_cast', denote_mkNotCached] + rw [LawfulOperator.denote_mem_prefix (f := mkNotCached)] + +end LawfulMapOperator + +structure MapTarget (aig : AIG α) (len : Nat) where + vec : RefVec aig len + func : (aig : AIG α) → Ref aig → Entrypoint α + [lawful : LawfulOperator α Ref func] + [chainable : LawfulMapOperator α func] + +attribute [instance] MapTarget.lawful +attribute [instance] MapTarget.chainable + +@[specialize] +def map (aig : AIG α) (target : MapTarget aig len) : RefVecEntry α len := + go aig 0 (by omega) .empty target.vec target.func +where + @[specialize] + go {len : Nat} (aig : AIG α) (idx : Nat) (hidx : idx ≤ len) (s : RefVec aig idx) + (input : RefVec aig len) (f : (aig : AIG α) → Ref aig → Entrypoint α) + [LawfulOperator α Ref f] [LawfulMapOperator α f] : + RefVecEntry α len := + if hidx : idx < len then + let res := f aig (input.get idx hidx) + let aig := res.aig + let newRef := res.ref + have := by + intros + apply LawfulOperator.le_size_of_le_aig_size + omega + let input := input.cast this + let s := s.cast this + let s := s.push newRef + go aig (idx + 1) (by omega) s input f + else + have : idx = len := by omega + ⟨aig, this ▸ s⟩ + termination_by len - idx + +theorem map.go_le_size {aig : AIG α} (idx : Nat) (hidx) (s : RefVec aig idx) + (input : RefVec aig len) (f : (aig : AIG α) → Ref aig → Entrypoint α) + [LawfulOperator α Ref f] [LawfulMapOperator α f] : + aig.decls.size ≤ (go aig idx hidx s input f).aig.decls.size := by + unfold go + split + . next h => + dsimp only + refine Nat.le_trans ?_ (by apply map.go_le_size) + apply LawfulOperator.le_size + . simp + termination_by len - idx + +theorem map_le_size {aig : AIG α} (target : MapTarget aig len) : + aig.decls.size ≤ (map aig target).aig.decls.size := by + unfold map + apply map.go_le_size + +theorem map.go_decl_eq {aig : AIG α} (i) (hi) + (s : RefVec aig i) (input : RefVec aig len) (f : (aig : AIG α) → Ref aig → Entrypoint α) + [LawfulOperator α Ref f] [LawfulMapOperator α f] : + ∀ (idx : Nat) (h1) (h2), (go aig i hi s input f).1.decls[idx]'h2 = aig.decls[idx]'h1 := by + generalize hgo : go aig i hi s input f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + rw [← hgo] + intros + rw [go_decl_eq] + rw [LawfulOperator.decl_eq] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . dsimp only at hgo + rw [← hgo] + intros + simp +termination_by len - i + +theorem map_decl_eq {aig : AIG α} (target : MapTarget aig len) : + ∀ idx (h1 : idx < aig.decls.size) (h2), + (map aig target).1.decls[idx]'h2 + = + aig.decls[idx]'h1 := by + intros + unfold map + apply map.go_decl_eq + +instance : LawfulVecOperator α MapTarget map where + le_size := by intros; apply map_le_size + decl_eq := by intros; apply map_decl_eq + +namespace map + +theorem go_get_aux {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) (s : RefVec aig curr) + (input : RefVec aig len) (f : (aig : AIG α) → Ref aig → Entrypoint α) + [LawfulOperator α Ref f] [LawfulMapOperator α f] : + -- The hfoo here is a trick to make the dependent type gods happy. + ∀ (idx : Nat) (hidx : idx < curr) (hfoo), + (go aig curr hcurr s input f).vec.get idx (by omega) + = + (s.get idx hidx).cast hfoo := by + intro idx hidx + generalize hgo : go aig curr hcurr s input f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + rw [← hgo] + intro hfoo + rw [go_get_aux] + rw [AIG.RefVec.get_push_ref_lt] + . simp only [Ref.cast, Ref.mk.injEq] + rw [AIG.RefVec.get_cast] + . simp + . assumption + . apply go_le_size + . dsimp only at hgo + rw [← hgo] + simp only [Nat.le_refl, get, Ref_cast', Ref.mk.injEq, true_implies] + have : curr = len := by omega + subst this + rfl +termination_by len - curr + +theorem go_get {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) (s : RefVec aig curr) + (input : RefVec aig len) (f : (aig : AIG α) → Ref aig → Entrypoint α) + [LawfulOperator α Ref f] [LawfulMapOperator α f] : + ∀ (idx : Nat) (hidx : idx < curr), + (go aig curr hcurr s input f).vec.get idx (by omega) + = + (s.get idx hidx).cast (by apply go_le_size) := by + intros + apply go_get_aux + +theorem go_denote_mem_prefix {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) + (s : RefVec aig curr) (input : RefVec aig len) + (f : (aig : AIG α) → Ref aig → Entrypoint α) [LawfulOperator α Ref f] [LawfulMapOperator α f] + (start : Nat) (hstart) : + ⟦ + (go aig curr hcurr s input f).aig, + ⟨start, by apply Nat.lt_of_lt_of_le; exact hstart; apply go_le_size⟩, + assign + ⟧ + = + ⟦aig, ⟨start, hstart⟩, assign⟧ := by + apply denote.eq_of_isPrefix (entry := ⟨aig, start,hstart⟩) + apply IsPrefix.of + . intros + apply go_decl_eq + . intros + apply go_le_size + +theorem denote_go {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) (s : RefVec aig curr) + (input : RefVec aig len) (f : (aig : AIG α) → Ref aig → Entrypoint α) + [LawfulOperator α Ref f] [LawfulMapOperator α f] : + ∀ (idx : Nat) (hidx1 : idx < len), + curr ≤ idx + → + ⟦(go aig curr hcurr s input f).aig, (go aig curr hcurr s input f).vec.get idx hidx1, assign⟧ + = + ⟦f aig (input.get idx hidx1), assign⟧ := by + intro idx hidx1 hidx2 + generalize hgo : go aig curr hcurr s input f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + cases Nat.eq_or_lt_of_le hidx2 with + | inl heq => + rw [← hgo] + rw [go_get] + rw [AIG.RefVec.get_push_ref_eq'] + . simp only [← heq] + rw [go_denote_mem_prefix] + . simp + . simp [Ref.hgate] + . rw [heq] + | inr hlt => + rw [← hgo] + rw [denote_go] + . simp [get_cast, -Ref_cast'] + . omega + . omega +termination_by len - curr + +end map + +@[simp] +theorem denote_map {aig : AIG α} (target : MapTarget aig len) : + ∀ (idx : Nat) (hidx : idx < len), + ⟦(map aig target).aig, (map aig target).vec.get idx hidx, assign⟧ + = + ⟦target.func aig (target.vec.get idx hidx), assign⟧ := by + intro idx hidx + unfold map + apply map.denote_go + omega + +end RefVec +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/RefVecOperator/Zip.lean b/src/Std/Sat/AIG/RefVecOperator/Zip.lean new file mode 100644 index 000000000000..e15a72a3ed43 --- /dev/null +++ b/src/Std/Sat/AIG/RefVecOperator/Zip.lean @@ -0,0 +1,269 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.RefVec +import Std.Sat.AIG.LawfulVecOperator + +namespace Std +namespace Sat + +namespace AIG +namespace RefVec + +variable {α : Type} [Hashable α] [DecidableEq α] {aig : AIG α} + +class LawfulZipOperator (α : Type) [Hashable α] [DecidableEq α] + (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) [LawfulOperator α BinaryInput f] : Prop + where + chainable : ∀ (aig : AIG α) (input1 input2 : BinaryInput aig) (h) (assign), + ⟦f (f aig input1).aig (input2.cast h), assign⟧ + = + ⟦f aig input2, assign⟧ + +namespace LawfulZipOperator + +@[simp] +theorem denote_prefix_cast_ref {aig : AIG α} {input1 input2 : BinaryInput aig} + {f : (aig : AIG α) → BinaryInput aig → Entrypoint α} [LawfulOperator α BinaryInput f] + [LawfulZipOperator α f] {h} : + ⟦f (f aig input1).aig (input2.cast h), assign⟧ + = + ⟦f aig input2, assign⟧ := by + rw [LawfulZipOperator.chainable] + +instance : LawfulZipOperator α mkAndCached where + chainable := by + intros + simp only [BinaryInput.cast, Ref_cast', denote_mkAndCached] + rw [LawfulOperator.denote_mem_prefix (f := mkAndCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkAndCached)] + +instance : LawfulZipOperator α mkOrCached where + chainable := by + intros + simp only [BinaryInput.cast, Ref_cast', denote_mkOrCached] + rw [LawfulOperator.denote_mem_prefix (f := mkOrCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkOrCached)] + +instance : LawfulZipOperator α mkXorCached where + chainable := by + intros + simp only [BinaryInput.cast, Ref_cast', denote_mkXorCached] + rw [LawfulOperator.denote_mem_prefix (f := mkXorCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkXorCached)] + +instance : LawfulZipOperator α mkBEqCached where + chainable := by + intros + simp only [BinaryInput.cast, Ref_cast', denote_mkBEqCached] + rw [LawfulOperator.denote_mem_prefix (f := mkBEqCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkBEqCached)] + +instance : LawfulZipOperator α mkImpCached where + chainable := by + intros + simp only [BinaryInput.cast, Ref_cast', denote_mkImpCached] + rw [LawfulOperator.denote_mem_prefix (f := mkImpCached)] + rw [LawfulOperator.denote_mem_prefix (f := mkImpCached)] + +end LawfulZipOperator + +structure ZipTarget (aig : AIG α) (len : Nat) where + input : BinaryRefVec aig len + func : (aig : AIG α) → BinaryInput aig → Entrypoint α + [lawful : LawfulOperator α BinaryInput func] + [chainable : LawfulZipOperator α func] + +attribute [instance] ZipTarget.lawful +attribute [instance] ZipTarget.chainable + +@[specialize] +def zip (aig : AIG α) (target : ZipTarget aig len) : RefVecEntry α len := + go aig 0 .empty (by omega) target.input.lhs target.input.rhs target.func +where + @[specialize] + go (aig : AIG α) (idx : Nat) (s : RefVec aig idx) (hidx : idx ≤ len) + (lhs rhs : RefVec aig len) (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) + [LawfulOperator α BinaryInput f] [chainable : LawfulZipOperator α f] : + RefVecEntry α len := + if hidx : idx < len then + let res := f aig ⟨lhs.get idx hidx, rhs.get idx hidx⟩ + let aig := res.aig + let newRef := res.ref + have := by + intros + apply LawfulOperator.le_size_of_le_aig_size + omega + let s := s.cast this + let s := s.push newRef + go aig (idx + 1) s (by omega) (lhs.cast this) (rhs.cast this) f + else + have : idx = len := by omega + ⟨aig, this ▸ s⟩ + termination_by len - idx + +theorem zip.go_le_size {aig : AIG α} (idx : Nat) (hidx) (s : RefVec aig idx) + (lhs rhs : RefVec aig len) + (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) [LawfulOperator α BinaryInput f] + [chainable : LawfulZipOperator α f] : + aig.decls.size ≤ (go aig idx s hidx lhs rhs f).1.decls.size := by + unfold go + split + . dsimp only + refine Nat.le_trans ?_ (by apply zip.go_le_size) + apply LawfulOperator.le_size + . simp + termination_by len - idx + +theorem zip_le_size {aig : AIG α} (target : ZipTarget aig len) : + aig.decls.size ≤ (zip aig target).1.decls.size := by + unfold zip + apply zip.go_le_size + +theorem zip.go_decl_eq {aig : AIG α} (i) (hi) (lhs rhs : RefVec aig len) + (s : RefVec aig i) (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) + [LawfulOperator α BinaryInput f] [chainable : LawfulZipOperator α f] : + ∀ (idx : Nat) (h1) (h2), (go aig i s hi lhs rhs f).1.decls[idx]'h2 = aig.decls[idx]'h1 := by + generalize hgo : go aig i s hi lhs rhs f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + rw [← hgo] + intros + intros + rw [go_decl_eq] + rw [LawfulOperator.decl_eq] + apply LawfulOperator.lt_size_of_lt_aig_size + assumption + . dsimp only at hgo + rw [← hgo] + intros + simp +termination_by len - i + +theorem zip_decl_eq {aig : AIG α} (target : ZipTarget aig len) : + ∀ idx (h1 : idx < aig.decls.size) (h2), + (zip aig target).1.decls[idx]'h2 = aig.decls[idx]'h1 := by + intros + unfold zip + apply zip.go_decl_eq + +instance : LawfulVecOperator α ZipTarget zip where + le_size := by intros; apply zip_le_size + decl_eq := by intros; apply zip_decl_eq + +namespace zip + +theorem go_get_aux {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) (s : RefVec aig curr) + (lhs rhs : RefVec aig len) (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) + [LawfulOperator α BinaryInput f] [chainable : LawfulZipOperator α f] : + -- The hfoo here is a trick to make the dependent type gods happy + ∀ (idx : Nat) (hidx : idx < curr) (hfoo), + (go aig curr s hcurr lhs rhs f).vec.get idx (by omega) + = + (s.get idx hidx).cast hfoo := by + intro idx hidx + generalize hgo : go aig curr s hcurr lhs rhs f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + rw [← hgo] + intro hfoo + rw [go_get_aux] + rw [AIG.RefVec.get_push_ref_lt] + . simp only [Ref.cast, Ref.mk.injEq] + rw [AIG.RefVec.get_cast] + . simp + . assumption + . apply go_le_size + . dsimp only at hgo + rw [← hgo] + simp only [Nat.le_refl, get, Ref_cast', Ref.mk.injEq, true_implies] + have : curr = len := by omega + subst this + simp +termination_by len - curr + +theorem go_get {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) (s : RefVec aig curr) + (lhs rhs : RefVec aig len) (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) + [LawfulOperator α BinaryInput f] [chainable : LawfulZipOperator α f] : + ∀ (idx : Nat) (hidx : idx < curr), + (go aig curr s hcurr lhs rhs f).vec.get idx (by omega) + = + (s.get idx hidx).cast (by apply go_le_size) := by + intros + apply go_get_aux + +theorem go_denote_mem_prefix {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) + (s : RefVec aig curr) (lhs rhs : RefVec aig len) + (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) [LawfulOperator α BinaryInput f] + [chainable : LawfulZipOperator α f] (start : Nat) (hstart) : + ⟦ + (go aig curr s hcurr lhs rhs f).aig, + ⟨start, by apply Nat.lt_of_lt_of_le; exact hstart; apply go_le_size⟩, + assign + ⟧ + = + ⟦aig, ⟨start, hstart⟩, assign⟧ := by + apply denote.eq_of_isPrefix (entry := ⟨aig, start,hstart⟩) + apply IsPrefix.of + . intros + apply go_decl_eq + . intros + apply go_le_size + +theorem denote_go {aig : AIG α} (curr : Nat) (hcurr : curr ≤ len) (s : RefVec aig curr) + (lhs rhs : RefVec aig len) (f : (aig : AIG α) → BinaryInput aig → Entrypoint α) + [LawfulOperator α BinaryInput f] [chainable : LawfulZipOperator α f] : + ∀ (idx : Nat) (hidx1 : idx < len), + curr ≤ idx + → + ⟦ + (go aig curr s hcurr lhs rhs f).aig, + (go aig curr s hcurr lhs rhs f).vec.get idx hidx1, + assign + ⟧ + = + ⟦f aig ⟨lhs.get idx hidx1, rhs.get idx hidx1⟩, assign⟧ := by + intro idx hidx1 hidx2 + generalize hgo : go aig curr s hcurr lhs rhs f = res + unfold go at hgo + split at hgo + . dsimp only at hgo + cases Nat.eq_or_lt_of_le hidx2 with + | inl heq => + rw [← hgo] + rw [go_get] + rw [AIG.RefVec.get_push_ref_eq'] + . simp only [← heq] + rw [go_denote_mem_prefix] + . simp + . simp [Ref.hgate] + . rw [heq] + | inr hlt => + rw [← hgo] + rw [denote_go] + . simp [-Ref_cast'] + . omega + . omega +termination_by len - curr + +end zip + +@[simp] +theorem denote_zip {aig : AIG α} (target : ZipTarget aig len) : + ∀ (idx : Nat) (hidx : idx < len), + ⟦(zip aig target).aig, (zip aig target).vec.get idx hidx, assign⟧ + = + ⟦target.func aig ⟨target.input.lhs.get idx hidx, target.input.rhs.get idx hidx⟩, assign⟧ := by + intros + apply zip.denote_go + omega + +end RefVec +end AIG + +end Sat +end Std diff --git a/src/Std/Sat/AIG/Relabel.lean b/src/Std/Sat/AIG/Relabel.lean new file mode 100644 index 000000000000..bb8c6c243aff --- /dev/null +++ b/src/Std/Sat/AIG/Relabel.lean @@ -0,0 +1,175 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Basic +import Std.Sat.AIG.Lemmas + +namespace Std +namespace Sat + +namespace AIG + +namespace Decl + +def relabel (r : α → β) (decl : Decl α) : Decl β := + match decl with + | .const b => .const b + | .atom a => .atom (r a) + | .gate lhs rhs linv rinv => .gate lhs rhs linv rinv + +theorem relabel_id_map (decl : Decl α) : relabel id decl = decl := by + simp only [relabel, id_eq] + cases decl <;> rfl + +theorem relabel_comp (decl : Decl α) (g : α → β) (h : β → γ) : + relabel (h ∘ g) decl = relabel h (relabel g decl) := by + cases decl <;> rfl + +theorem relabel_const {decls : Array (Decl α)} {r : α → β} {hidx : idx < decls.size} + (h : relabel r decls[idx] = .const b) : + decls[idx] = (.const b) := by + unfold relabel at h + split at h <;> simp_all + +theorem relabel_atom {decls : Array (Decl α)} {r : α → β} {hidx : idx < decls.size} + (h : relabel r decls[idx] = .atom a) : + ∃ x, decls[idx] = .atom x ∧ a = r x := by + unfold relabel at h + split at h + . contradiction + . next x heq => + injection h with h + exists x + simp [heq, h] + . contradiction + +theorem relabel_gate {decls : Array (Decl α)} {r : α → β} {hidx : idx < decls.size} + (h : relabel r decls[idx] = .gate lhs rhs linv rinv) : + decls[idx] = (.gate lhs rhs linv rinv : Decl α) := by + unfold relabel at h + split at h <;> simp_all + +end Decl + +variable {α : Type} [Hashable α] [DecidableEq α] +variable {β : Type} [Hashable β] [DecidableEq β] + +def relabel (r : α → β) (aig : AIG α) : AIG β := + let decls := aig.decls.map (Decl.relabel r) + let cache := Cache.empty decls + { + decls, + cache, + invariant := by + intro idx lhs rhs linv rinv hbound hgate + simp [decls] at hgate + have := Decl.relabel_gate hgate + apply aig.invariant + assumption + } + +@[simp] +theorem relabel_size_eq_size {aig : AIG α} {r : α → β} : + (aig.relabel r).decls.size = aig.decls.size := by + simp [relabel] + +theorem relabel_const {aig : AIG α} {r : α → β} {hidx : idx < (relabel r aig).decls.size} + (h : (relabel r aig).decls[idx]'hidx = .const b) : + aig.decls[idx]'(by rw [← relabel_size_eq_size (r := r)]; omega) = .const b := by + apply Decl.relabel_const + simpa [relabel] using h + + +theorem relabel_atom {aig : AIG α} {r : α → β} {hidx : idx < (relabel r aig).decls.size} + (h : (relabel r aig).decls[idx]'hidx = .atom a) : + ∃ x, aig.decls[idx]'(by rw [← relabel_size_eq_size (r := r)]; omega) = .atom x ∧ a = r x := by + apply Decl.relabel_atom + simpa [relabel] using h + +theorem relabel_gate {aig : AIG α} {r : α → β} {hidx : idx < (relabel r aig).decls.size} + (h : (relabel r aig).decls[idx]'hidx = .gate lhs rhs linv rinv) : + aig.decls[idx]'(by rw [← relabel_size_eq_size (r := r)]; omega) = .gate lhs rhs linv rinv := by + apply Decl.relabel_gate + simpa [relabel] using h + +@[simp] +theorem denote_relabel (aig : AIG α) (r : α → β) (start : Nat) {hidx} + (assign : β → Bool) : + ⟦aig.relabel r, ⟨start, hidx⟩, assign⟧ + = + ⟦aig, ⟨start, by rw [← relabel_size_eq_size (r := r)]; omega⟩, (assign ∘ r)⟧ := by + apply denote_idx_trichotomy + . intro b heq1 + have heq2 := relabel_const heq1 + rw [denote_idx_const heq1] + rw [denote_idx_const heq2] + . intro a heq1 + rw [denote_idx_atom heq1] + rcases relabel_atom heq1 with ⟨x, ⟨hlx, hrx⟩⟩ + rw [hrx] at heq1 + rw [denote_idx_atom hlx] + simp [hrx] + . intro lhs rhs linv rinv heq1 + have heq2 := relabel_gate heq1 + rw [denote_idx_gate heq1] + rw [denote_idx_gate heq2] + have := aig.invariant (by rw [← relabel_size_eq_size (r := r)]; omega) heq2 + rw [denote_relabel aig r lhs assign] + rw [denote_relabel aig r rhs assign] + +theorem unsat_relabel {aig : AIG α} (r : α → β) {hidx} : + aig.UnsatAt idx hidx → (aig.relabel r).UnsatAt idx (by simp [hidx]) := by + intro h assign + specialize h (assign ∘ r) + simp [h] + +theorem relabel_unsat_iff [Nonempty α] {aig : AIG α} {r : α → β} {hidx1} {hidx2} + (hinj : ∀ x y, x ∈ aig → y ∈ aig → r x = r y → x = y) : + (aig.relabel r).UnsatAt idx hidx1 ↔ aig.UnsatAt idx hidx2 := by + constructor + . intro h assign + let g : β → α := fun b => + have em := Classical.propDecidable + if h : ∃ a, a ∈ aig ∧ r a = b then h.choose else Classical.choice inferInstance + have h' := unsat_relabel g h + specialize h' assign + simp only [denote_relabel] at h' + rw [← h'] + apply denote_congr + . intro a hmem + simp only [Function.comp_apply, g] + split + . next h => + rcases Exists.choose_spec h with ⟨_, heq⟩ + specialize hinj _ _ (by assumption) (by assumption) heq + simp [hinj] + . next h => + simp only [not_exists, not_and] at h + specialize h a hmem + contradiction + . apply unsat_relabel + +namespace Entrypoint + +def relabel (r : α → β) (entry : Entrypoint α) : Entrypoint β := + { entry with + aig := entry.aig.relabel r + ref.hgate := by simp [entry.ref.hgate] + } + +@[simp] +theorem relabel_size_eq {entry : Entrypoint α} {r : α → β} : + (entry.relabel r).aig.decls.size = entry.aig.decls.size := by + simp [relabel] + +theorem relabel_unsat_iff [Nonempty α] {entry : Entrypoint α} {r : α → β} + (hinj : ∀ x y, x ∈ entry.aig → y ∈ entry.aig → r x = r y → x = y) : + (entry.relabel r).Unsat ↔ entry.Unsat := by + simp [relabel, Unsat] + rw [AIG.relabel_unsat_iff] + assumption + +end Entrypoint +end AIG diff --git a/src/Std/Sat/AIG/RelabelNat.lean b/src/Std/Sat/AIG/RelabelNat.lean new file mode 100644 index 000000000000..88e1d07287e0 --- /dev/null +++ b/src/Std/Sat/AIG/RelabelNat.lean @@ -0,0 +1,386 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Henrik Böving +-/ +import Std.Sat.AIG.Relabel + + +namespace Std +namespace Sat + +variable {α : Type} [DecidableEq α] [Hashable α] + +namespace AIG +namespace RelabelNat +namespace State + +/-- +This invariant ensures that we only insert an atom at most once and with a monotonically increasing +index. +-/ +inductive Inv1 : Nat → HashMap α Nat → Prop where +| empty : Inv1 0 {} +| insert (hinv : Inv1 n map) (hfind : map[x]? = none) : Inv1 (n + 1) (map.insert x n) + +theorem Inv1.lt_of_get?_eq_some [EquivBEq α] {n m : Nat} (map : HashMap α Nat) (x : α) + (hinv : Inv1 n map) : + map[x]? = some m → m < n := by + induction hinv with + | empty => simp + | insert ih1 ih2 ih3 => + rename_i y + rw [Std.HashMap.getElem?_insert] + match hx : x == y with + | true => + simp only [beq_iff_eq] at hx + simp only [hx, beq_self_eq_true, ↓reduceIte, Option.some.injEq] + omega + | false => + simp only [BEq.symm_false hx, Bool.false_eq_true, ↓reduceIte] + intro h + specialize ih3 h + omega + +/-- +If a HashMap fulfills `Inv1` it is in an injection. +-/ +theorem Inv1.property [EquivBEq α] {n m : Nat} (x y : α) (map : HashMap α Nat) (hinv : Inv1 n map) + (hfound1 : map[x]? = some m) (hfound2 : map[y]? = some m) : x = y := by + induction hinv with + | empty => simp at hfound1 + | insert ih1 _ih2 ih3 => + rename_i z + rw [HashMap.getElem?_insert] at hfound1 + rw [HashMap.getElem?_insert] at hfound2 + match hx : z == x with + | false => + simp only [beq_eq_false_iff_ne, ne_eq] at hx + simp only [beq_iff_eq, hx, ↓reduceIte] at hfound1 + match hy : z == y with + | false => + simp only [beq_eq_false_iff_ne, ne_eq] at hy + simp only [beq_iff_eq, hy, ↓reduceIte] at hfound2 + exact ih3 hfound1 hfound2 + | true => + simp only [hy, ↓reduceIte, Option.some.injEq] at hfound2 + have := Inv1.lt_of_get?_eq_some _ _ ih1 hfound1 + omega + | true => + simp only [hx, ↓reduceIte, Option.some.injEq] at hfound1 + rcases hfound1 with ⟨rfl⟩ + match hy : z == y with + | false => + simp only [beq_eq_false_iff_ne, ne_eq] at hy + simp only [beq_iff_eq, hy, ↓reduceIte] at hfound2 + have := Inv1.lt_of_get?_eq_some _ _ ih1 hfound2 + omega + | true => + simp only [beq_iff_eq] at hx hy + simp only [←hx, hy] + +/-- +This invariant says that we have already visited and inserted all nodes up to a certain index. +-/ +inductive Inv2 (decls : Array (Decl α)) : Nat → HashMap α Nat → Prop where +| empty : Inv2 decls 0 {} +| newAtom (hinv : Inv2 decls idx map) (hlt : idx < decls.size) (hatom : decls[idx] = .atom a) + (hmap : map[a]? = none) : Inv2 decls (idx + 1) (map.insert a val) +| oldAtom (hinv : Inv2 decls idx map) (hlt : idx < decls.size) (hatom : decls[idx] = .atom a) + (hmap : map[a]? = some n) : Inv2 decls (idx + 1) map +| const (hinv : Inv2 decls idx map) (hlt : idx < decls.size) (hatom : decls[idx] = .const b) : + Inv2 decls (idx + 1) map +| gate (hinv : Inv2 decls idx map) (hlt : idx < decls.size) (hatom : decls[idx] = .gate l r li ri) : + Inv2 decls (idx + 1) map + +theorem Inv2.upper_lt_size {decls : Array (Decl α)} (hinv : Inv2 decls upper map) : + upper ≤ decls.size := by + cases hinv <;> omega + +/-- +The key property provided by `Inv2`, if we have `Inv2` at a certain index, then all atoms below +that index have been inserted into the `HashMap`. +-/ +theorem Inv2.property (decls : Array (Decl α)) (idx upper : Nat) (map : HashMap α Nat) + (hidx : idx < upper) (a : α) (hinv : Inv2 decls upper map) + (heq : decls[idx]'(by have := upper_lt_size hinv; omega) = .atom a) : + ∃ n, map[a]? = some n := by + induction hinv with + | empty => omega + | newAtom ih1 ih2 ih3 ih4 ih5 => + next idx' _ a' _ => + replace hidx : idx ≤ idx' := by omega + rw [HashMap.getElem?_insert] + match heq2 : a' == a with + | false => + simp only [Bool.false_eq_true, ↓reduceIte] + cases Nat.eq_or_lt_of_le hidx with + | inl hidxeq => + subst hidxeq + simp_all only [beq_eq_false_iff_ne, Decl.atom.injEq] + | inr hlt => + exact ih5 hlt heq + | true => + exact Option.isSome_iff_exists.mp rfl + | oldAtom ih1 ih2 ih3 ih4 ih5 => + simp_all only [true_implies] + next idx' _ _ _ => + replace hidx : idx ≤ idx' := by omega + cases Nat.eq_or_lt_of_le hidx with + | inl hidxeq => + simp only [hidxeq, ih3, Decl.atom.injEq] at heq + rw [← heq] + apply Exists.intro + assumption + | inr hlt => apply ih5 <;> assumption + | const ih1 ih2 ih3 ih4 => + next idx' _ _ => + replace hidx : idx ≤ idx' := by omega + cases Nat.eq_or_lt_of_le hidx with + | inl hidxeq => simp only [hidxeq, ih3] at heq + | inr hlt => apply ih4 <;> assumption + | gate ih1 ih2 ih3 ih4 => + next idx' _ _ _ _ _ => + replace hidx : idx ≤ idx' := by omega + cases Nat.eq_or_lt_of_le hidx with + | inl hidxeq => simp [hidxeq, ih3] at heq + | inr hlt => apply ih4 <;> assumption + +end State + +/-- +The invariant carrying state structure for building the `HashMap` that translates from arbitrary +atom identifiers to `Nat`. +-/ +structure State (α : Type) [DecidableEq α] [Hashable α] (decls : Array (Decl α)) (idx : Nat) where + /-- + The next number to use for identifying an atom. + -/ + max : Nat + /-- + The translation `HashMap` + -/ + map : HashMap α Nat + /-- + Proof that we never reuse a number. + -/ + inv1 : State.Inv1 max map + /-- + Proof that we inserted all atoms until `idx`. + -/ + inv2 : State.Inv2 decls idx map + +namespace State + +/-- +The basic initial state. +-/ +def empty {decls : Array (Decl α)} : State α decls 0 := + { max := 0, map := {}, inv1 := Inv1.empty, inv2 := Inv2.empty } + +/-- +Insert a `Decl.atom` into the `State` structure. +-/ +def addAtom {decls : Array (Decl α)} {hidx} (state : State α decls idx) (a : α) + (h : decls[idx]'hidx = .atom a) : + State α decls (idx + 1) := + match hmap : state.map[a]? with + | some _ => + { state with + inv2 := by + apply Inv2.oldAtom + . exact state.inv2 + . assumption + . assumption + } + | none => + { + max := state.max + 1 + map := state.map.insert a state.max + inv1 := by + apply State.Inv1.insert + . exact state.inv1 + . assumption + inv2 := by + apply Inv2.newAtom + . exact state.inv2 + . assumption + . assumption + } + +/-- +Insert a `Decl.const` into the `State` structure. +-/ +def addConst {decls : Array (Decl α)} {hidx} (state : State α decls idx) (b : Bool) + (h : decls[idx]'hidx = .const b) : + State α decls (idx + 1) := + { state with + inv2 := by + apply Inv2.const + . exact state.inv2 + . assumption + } + +/-- +Insert a `Decl.gate` into the `State` structure. +-/ +def addGate {decls : Array (Decl α)} {hidx} (state : State α decls idx) (lhs rhs : Nat) + (linv rinv : Bool) (h : decls[idx]'hidx = .gate lhs rhs linv rinv) : + State α decls (idx + 1) := + { state with + inv2 := by + apply Inv2.gate + . exact state.inv2 + . assumption + } + +/-- +Build up a `State` that has all atoms of an `AIG` inserted. +-/ +def ofAIGAux (aig : AIG α) : State α aig.decls aig.decls.size := + go aig.decls 0 .empty +where + go (decls : Array (Decl α)) (idx : Nat) (state : State α decls idx) : State α decls decls.size := + if hidx : idx < decls.size then + let decl := decls[idx] + match hdecl : decl with + | .atom a => go decls (idx + 1) (state.addAtom a hdecl) + | .const b => go decls (idx + 1) (state.addConst b hdecl) + | .gate lhs rhs linv rinv => go decls (idx + 1) (state.addGate lhs rhs linv rinv hdecl) + else + have : idx = decls.size := by + have := state.inv2.upper_lt_size + omega + this ▸ state + termination_by decls.size - idx + +/-- +Obtain the atom mapping from α to `Nat` for a given `AIG`. +-/ +def ofAIG (aig : AIG α) : HashMap α Nat := + ofAIGAux aig |>.map + +/-- +The map returned by `ofAIG` fulfills the `Inv1` property. +-/ +theorem ofAIG.Inv1 (aig : AIG α) : ∃ n, Inv1 n (ofAIG aig) := by + exists (ofAIGAux aig).max + dsimp only [ofAIG] + exact (ofAIGAux aig).inv1 + +/-- +The map returned by `ofAIG` fulfills the `Inv2` property. +-/ +theorem ofAIG.Inv2 (aig : AIG α) : Inv2 aig.decls aig.decls.size (ofAIG aig) := by + have := (ofAIGAux aig).inv2 + simp [ofAIG, this] + +/-- +Assuming that we find a `Nat` for an atom in the `ofAIG` map, that `Nat` is unique in the map. +-/ +theorem ofAIG_find_unique {aig : AIG α} (a : α) (ha : (ofAIG aig)[a]? = some n) : + ∀ a', (ofAIG aig)[a']? = some n → a = a' := by + intro a' ha' + rcases ofAIG.Inv1 aig with ⟨n, hn⟩ + apply Inv1.property <;> assumption + +/-- +We will find a `Nat` for every atom in the `AIG` that the `ofAIG` map was built from. +-/ +theorem ofAIG_find_some {aig : AIG α} : ∀ a ∈ aig, ∃ n, (ofAIG aig)[a]? = some n := by + intro a ha + simp only [mem_def] at ha + rcases Array.getElem_of_mem ha with ⟨i, isLt, hi⟩ + apply Inv2.property + . assumption + . exact aig.decls.size + . omega + · apply ofAIG.Inv2 + +end State +end RelabelNat + +def relabelNat' (aig : AIG α) : (AIG Nat × HashMap α Nat) := + let map := RelabelNat.State.ofAIG aig + let aig := aig.relabel fun x => + -- The none branch never gets hit, we prove this below. + match map[x]? with + | some var => var + | none => 0 + (aig, map) + +/-- +Map an `AIG` with arbitrary atom identifiers to one that uses `Nat` as atom identifiers. This is +useful for preparing an `AIG` for CNF translation if it doesn't already use `Nat` identifiers. +-/ +def relabelNat (aig : AIG α) : AIG Nat := + relabelNat' aig |>.fst + +@[simp] +theorem relabelNat'_fst_eq_relabelNat {aig : AIG α} : aig.relabelNat'.fst = aig.relabelNat := by + rfl + +@[simp] +theorem relabelNat_size_eq_size {aig : AIG α} : aig.relabelNat.decls.size = aig.decls.size := by + simp [relabelNat, relabelNat'] + +/-- +`relabelNat` preserves unsatisfiablility. +-/ +theorem relabelNat_unsat_iff [Nonempty α] {aig : AIG α} {hidx1} {hidx2} : + (aig.relabelNat).UnsatAt idx hidx1 ↔ aig.UnsatAt idx hidx2 := by + dsimp only [relabelNat, relabelNat'] + rw [relabel_unsat_iff] + intro x y hx hy heq + split at heq + . next hcase1 => + split at heq + . next hcase2 => + apply RelabelNat.State.ofAIG_find_unique + . assumption + . rw [heq] + assumption + . next hcase2 => + exfalso + rcases RelabelNat.State.ofAIG_find_some y hy with ⟨n, hn⟩ + simp [hcase2] at hn + . next hcase => + exfalso + rcases RelabelNat.State.ofAIG_find_some x hx with ⟨n, hn⟩ + simp [hcase] at hn + +namespace Entrypoint + +def relabelNat' (entry : Entrypoint α) : (Entrypoint Nat × HashMap α Nat) := + let res := entry.aig.relabelNat' + let entry := + { entry with + aig := res.fst, + ref.hgate := by simp [entry.ref.hgate, res] + } + (entry, res.snd) + +/-- +Map an `Entrypoint` with arbitrary atom identifiers to one that uses `Nat` as atom identifiers. +This is useful for preparing an `AIG` for CNF translation if it doesn't already use `Nat` +identifiers. +-/ +def relabelNat (entry : Entrypoint α) : Entrypoint Nat := + { entry with + aig := entry.aig.relabelNat + ref.hgate := by simp [entry.ref.hgate] + } + +/-- +`relabelNat` preserves unsatisfiablility. +-/ +theorem relabelNat_unsat_iff {entry : Entrypoint α} [Nonempty α] : + (entry.relabelNat).Unsat ↔ entry.Unsat:= by + simp only [Unsat, relabelNat] + rw [AIG.relabelNat_unsat_iff] + +end Entrypoint +end AIG + +end Sat +end Std diff --git a/src/cmake/Modules/FindLibUV.cmake b/src/cmake/Modules/FindLibUV.cmake new file mode 100644 index 000000000000..96cebd82bd94 --- /dev/null +++ b/src/cmake/Modules/FindLibUV.cmake @@ -0,0 +1,12 @@ +if (LIBUV_INCLUDE_DIR AND LIBUV_LIBRARIES) + # Already in cache, be silent + set(LIBUV_FIND_QUIETLY TRUE) +endif (LIBUV_INCLUDE_DIR AND LIBUV_LIBRARIES) + +find_path(LIBUV_INCLUDE_DIR NAMES uv.h) +find_library(LIBUV_LIBRARIES NAMES uv libuv REQUIRED) +MESSAGE(STATUS "LIBUV: " ${LIBUV_LIBRARIES}) + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(LibUV DEFAULT_MSG LIBUV_INCLUDE_DIR LIBUV_LIBRARIES) +mark_as_advanced(LIBUV_INCLUDE_DIR LIBUV_LIBRARIES) diff --git a/src/include/lean/lean_libuv.h b/src/include/lean/lean_libuv.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/kernel/environment.h b/src/kernel/environment.h index eaa077dbf16a..d0d61a8eed9b 100644 --- a/src/kernel/environment.h +++ b/src/kernel/environment.h @@ -59,7 +59,7 @@ class scoped_diagnostics { diagnostics * get() const { return m_diag; } }; -class environment : public object_ref { +class LEAN_EXPORT environment : public object_ref { friend class add_inductive_fn; void check_name(name const & n) const; diff --git a/src/kernel/expr_eq_fn.cpp b/src/kernel/expr_eq_fn.cpp index 7078565217ad..c0b0d55a7df7 100644 --- a/src/kernel/expr_eq_fn.cpp +++ b/src/kernel/expr_eq_fn.cpp @@ -23,7 +23,7 @@ template class expr_eq_fn { struct key_hasher { std::size_t operator()(std::pair const & p) const { - return hash((size_t)p.first >> 3, (size_t)p.first >> 3); + return hash((size_t)p.first >> 3, (size_t)p.second >> 3); } }; typedef std::unordered_set, key_hasher> cache; diff --git a/src/kernel/init_module.cpp b/src/kernel/init_module.cpp index d676c46faac0..39ebd81de16a 100644 --- a/src/kernel/init_module.cpp +++ b/src/kernel/init_module.cpp @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ +#include "kernel/init_module.h" #include "kernel/environment.h" #include "kernel/type_checker.h" #include "kernel/expr.h" diff --git a/src/kernel/init_module.h b/src/kernel/init_module.h index f15ea5f24e94..c7f1bffa59f2 100644 --- a/src/kernel/init_module.h +++ b/src/kernel/init_module.h @@ -5,7 +5,9 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #pragma once +#include "runtime/object.h" + namespace lean { -void initialize_kernel_module(); -void finalize_kernel_module(); +LEAN_EXPORT void initialize_kernel_module(); +LEAN_EXPORT void finalize_kernel_module(); } diff --git a/src/kernel/instantiate_mvars.cpp b/src/kernel/instantiate_mvars.cpp index 9aafd17449ee..b834bf26920f 100644 --- a/src/kernel/instantiate_mvars.cpp +++ b/src/kernel/instantiate_mvars.cpp @@ -267,44 +267,48 @@ class instantiate_mvars_fn { return visit_app_default(e); } else { name const & mid = mvar_name(f); + /* + Regular assignments take precedence over delayed ones. + When an error occurs, Lean assigns `sorry` to unassigned metavariables. + The idea is to ensure we can submit the declaration to the kernel and proceed. + Some of the metavariables may have been delayed assigned. + */ + if (auto f_new = get_assignment(mid)) { + // `f` is an assigned metavariable. + buffer args; + return visit_args_and_beta(*f_new, e, args); + } option_ref d = get_delayed_mvar_assignment(m_mctx, mid); if (!d) { // mvar is not delayed assigned - expr f_new = visit(f); - if (is_eqp(f, f_new)) { - return visit_mvar_app_args(e); - } else { - buffer args; - return visit_args_and_beta(f_new, e, args); - } - } else { + return visit_mvar_app_args(e); + } + /* + Apply "delayed substitution" (i.e., delayed assignment + application). + That is, `f` is some metavariable `?m`, that is delayed assigned to `val`. + If after instantiating `val`, we obtain `newVal`, and `newVal` does not contain + metavariables, we replace the free variables `fvars` in `newVal` with the first + `fvars.size` elements of `args`. + */ + array_ref fvars(cnstr_get(d.get_val().raw(), 0), true); + name mid_pending(cnstr_get(d.get_val().raw(), 1), true); + if (fvars.size() > get_app_num_args(e)) { /* - Apply "delayed substitution" (i.e., delayed assignment + application). - That is, `f` is some metavariable `?m`, that is delayed assigned to `val`. - If after instantiating `val`, we obtain `newVal`, and `newVal` does not contain - metavariables, we replace the free variables `fvars` in `newVal` with the first - `fvars.size` elements of `args`. - */ - array_ref fvars(cnstr_get(d.get_val().raw(), 0), true); - name mid_pending(cnstr_get(d.get_val().raw(), 1), true); - if (fvars.size() > get_app_num_args(e)) { - /* - We don't have sufficient arguments for instantiating the free variables `fvars`. - This can only happen if a tactic or elaboration function is not implemented correctly. - We decided to not use `panic!` here and report it as an error in the frontend - when we are checking for unassigned metavariables in an elaborated term. */ - return visit_mvar_app_args(e); - } - optional val = get_assignment(mid_pending); - if (!val) - // mid_pending has not been assigned yet. - return visit_mvar_app_args(e); - if (has_expr_mvar(*val)) - // mid_pending has been assigned, but assignment contains mvars. - return visit_mvar_app_args(e); - buffer args; - return visit_delayed(fvars, *val, e, args); + We don't have sufficient arguments for instantiating the free variables `fvars`. + This can only happen if a tactic or elaboration function is not implemented correctly. + We decided to not use `panic!` here and report it as an error in the frontend + when we are checking for unassigned metavariables in an elaborated term. */ + return visit_mvar_app_args(e); } + optional val = get_assignment(mid_pending); + if (!val) + // mid_pending has not been assigned yet. + return visit_mvar_app_args(e); + if (has_expr_mvar(*val)) + // mid_pending has been assigned, but assignment contains mvars. + return visit_mvar_app_args(e); + buffer args; + return visit_delayed(fvars, *val, e, args); } } diff --git a/src/lake/examples/reverse-ffi/Makefile b/src/lake/examples/reverse-ffi/Makefile index 1942659b1b5e..67e4d538320d 100644 --- a/src/lake/examples/reverse-ffi/Makefile +++ b/src/lake/examples/reverse-ffi/Makefile @@ -23,7 +23,7 @@ endif $(OUT_DIR)/main: main.c lake | $(OUT_DIR) # Add library paths for Lake package and for Lean itself - cc -o $@ $< -I $(LEAN_SYSROOT)/include -L $(LEAN_LIBDIR) -L lib/.lake/build/lib -lRFFI -lInit_shared -lleanshared $(LINK_FLAGS) + cc -o $@ $< -I $(LEAN_SYSROOT)/include -L $(LEAN_LIBDIR) -L lib/.lake/build/lib -lRFFI -lInit_shared -lleanshared_1 -lleanshared $(LINK_FLAGS) run: $(OUT_DIR)/main ifeq ($(OS),Windows_NT) @@ -55,7 +55,7 @@ endif $(OUT_DIR)/main-local: main.c lake | $(OUT_DIR) cp -f $(LEAN_SHLIB_ROOT)/*.$(SHLIB_EXT) lib/.lake/build/lib/$(SHLIB_PREFIX)RFFI.$(SHLIB_EXT) $(OUT_DIR) - cc -o $@ $< -I $(LEAN_SYSROOT)/include -L $(OUT_DIR) -lRFFI -lInit_shared -lleanshared $(LINK_FLAGS_LOCAL) + cc -o $@ $< -I $(LEAN_SYSROOT)/include -L $(OUT_DIR) -lRFFI -lInit_shared -lleanshared_1 -lleanshared $(LINK_FLAGS_LOCAL) run-local: $(OUT_DIR)/main-local $(OUT_DIR)/main-local diff --git a/src/lean.mk.in b/src/lean.mk.in index 32527c6fffde..1a37a59cf9e6 100644 --- a/src/lean.mk.in +++ b/src/lean.mk.in @@ -143,7 +143,10 @@ $(LIB_OUT)/$(STATIC_LIB_NAME): $(NAT_OBJS) | $(LIB_OUT) $(TEMP_OUT)/$(STATIC_LIB_NAME).export: $(addsuffix .export,$(NAT_OBJS)) | $(LIB_OUT) @rm -f $@ $(file >$@.in) $(foreach O,$^,$(file >>$@.in,"$O")) - @$(LEAN_AR) rcs $@ @$@.in + # "T"hin archive seems necessary to preserve paths so that we can distinguish + # between object files with the same file name when manipulating the archive for + # `libleanshared_1` + @$(LEAN_AR) rcsT $@ @$@.in @rm -f $@.in else $(LIB_OUT)/$(STATIC_LIB_NAME): $(NAT_OBJS) | $(LIB_OUT) diff --git a/src/library/compiler/init_module.cpp b/src/library/compiler/init_module.cpp index 3270b4687bd1..851a90f611b7 100644 --- a/src/library/compiler/init_module.cpp +++ b/src/library/compiler/init_module.cpp @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ +#include "library/compiler/init_module.h" #include "library/compiler/util.h" #include "library/compiler/lcnf.h" #include "library/compiler/elim_dead_let.h" diff --git a/src/library/compiler/init_module.h b/src/library/compiler/init_module.h index 575bce62400a..fd9bf60c341b 100644 --- a/src/library/compiler/init_module.h +++ b/src/library/compiler/init_module.h @@ -5,7 +5,9 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #pragma once +#include "runtime/object.h" + namespace lean { -void initialize_compiler_module(); -void finalize_compiler_module(); +LEAN_EXPORT void initialize_compiler_module(); +LEAN_EXPORT void finalize_compiler_module(); } diff --git a/src/library/compiler/ir.h b/src/library/compiler/ir.h index 0049d4ee5701..07b395acca0e 100644 --- a/src/library/compiler/ir.h +++ b/src/library/compiler/ir.h @@ -36,7 +36,7 @@ std::string decl_to_string(decl const & d); void test(decl const & d); environment compile(environment const & env, options const & opts, comp_decls const & decls); environment add_extern(environment const & env, name const & fn); -string_ref emit_c(environment const & env, name const & mod_name); +LEAN_EXPORT string_ref emit_c(environment const & env, name const & mod_name); void emit_llvm(environment const & env, name const & mod_name, std::string const &filepath); } void initialize_ir(); diff --git a/src/library/compiler/ir_interpreter.cpp b/src/library/compiler/ir_interpreter.cpp index 5a394566cca2..12c657cd4dcc 100644 --- a/src/library/compiler/ir_interpreter.cpp +++ b/src/library/compiler/ir_interpreter.cpp @@ -34,6 +34,7 @@ functions, which have a (relatively) homogeneous ABI that we can use without run #else #include #endif +#include "library/compiler/ir_interpreter.h" #include "runtime/flet.h" #include "runtime/apply.h" #include "runtime/interrupt.h" diff --git a/src/library/compiler/ir_interpreter.h b/src/library/compiler/ir_interpreter.h index 722d809c0bad..00a65a34fe62 100644 --- a/src/library/compiler/ir_interpreter.h +++ b/src/library/compiler/ir_interpreter.h @@ -12,7 +12,7 @@ namespace lean { namespace ir { /** \brief Run `n` using the "boxed" ABI, i.e. with all-owned parameters. */ object * run_boxed(environment const & env, options const & opts, name const & fn, unsigned n, object **args); -uint32 run_main(environment const & env, options const & opts, int argv, char * argc[]); +LEAN_EXPORT uint32 run_main(environment const & env, options const & opts, int argv, char * argc[]); } void initialize_ir_interpreter(); void finalize_ir_interpreter(); diff --git a/src/library/constructions/init_module.cpp b/src/library/constructions/init_module.cpp index e57719543192..32fc32aa351a 100644 --- a/src/library/constructions/init_module.cpp +++ b/src/library/constructions/init_module.cpp @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ +#include "library/constructions/init_module.h" #include "library/constructions/projection.h" #include "library/constructions/util.h" diff --git a/src/library/constructions/init_module.h b/src/library/constructions/init_module.h index 902bdb1aac85..04d27cd1ee9b 100644 --- a/src/library/constructions/init_module.h +++ b/src/library/constructions/init_module.h @@ -5,8 +5,9 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #pragma once +#include "runtime/object.h" namespace lean { -void initialize_constructions_module(); -void finalize_constructions_module(); +LEAN_EXPORT void initialize_constructions_module(); +LEAN_EXPORT void finalize_constructions_module(); } diff --git a/src/library/init_module.cpp b/src/library/init_module.cpp index ccb9edbcfcaf..7f4baba1abbf 100644 --- a/src/library/init_module.cpp +++ b/src/library/init_module.cpp @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ +#include "library/init_module.h" #include "library/constants.h" #include "library/class.h" #include "library/num.h" diff --git a/src/library/init_module.h b/src/library/init_module.h index 31e56a5bc2b5..45149946a85e 100644 --- a/src/library/init_module.h +++ b/src/library/init_module.h @@ -5,10 +5,11 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #pragma once +#include namespace lean { -void initialize_library_core_module(); -void finalize_library_core_module(); -void initialize_library_module(); -void finalize_library_module(); +LEAN_EXPORT void initialize_library_core_module(); +LEAN_EXPORT void finalize_library_core_module(); +LEAN_EXPORT void initialize_library_module(); +LEAN_EXPORT void finalize_library_module(); } diff --git a/src/library/module.h b/src/library/module.h index 9ab3b51b2e6a..ae6e508c0d82 100644 --- a/src/library/module.h +++ b/src/library/module.h @@ -14,5 +14,5 @@ Authors: Leonardo de Moura, Gabriel Ebner, Sebastian Ullrich namespace lean { /** \brief Store module using \c env. */ -void write_module(environment const & env, std::string const & olean_fn); +LEAN_EXPORT void write_module(environment const & env, std::string const & olean_fn); } diff --git a/src/library/print.h b/src/library/print.h index 40e696a493c8..7bf6bab09b47 100644 --- a/src/library/print.h +++ b/src/library/print.h @@ -23,7 +23,7 @@ pair binding_body_fresh(expr const & b, bool preserve_type = false); pair let_body_fresh(expr const & b, bool preserve_type = false); /** \brief Use simple formatter as the default print function */ -void init_default_print_fn(); +LEAN_EXPORT void init_default_print_fn(); void initialize_print(); void finalize_print(); diff --git a/src/library/profiling.h b/src/library/profiling.h index 73950cefca5a..a375b292bfd7 100644 --- a/src/library/profiling.h +++ b/src/library/profiling.h @@ -12,8 +12,8 @@ namespace lean { using second_duration = std::chrono::duration; -bool get_profiler(options const &); -second_duration get_profiling_threshold(options const &); +LEAN_EXPORT bool get_profiler(options const &); +LEAN_EXPORT second_duration get_profiling_threshold(options const &); void initialize_profiling(); void finalize_profiling(); diff --git a/src/library/time_task.h b/src/library/time_task.h index 3c2ffb55b1da..992ece66feb7 100644 --- a/src/library/time_task.h +++ b/src/library/time_task.h @@ -11,11 +11,11 @@ Author: Sebastian Ullrich #include "util/message_definitions.h" namespace lean { -void report_profiling_time(std::string const & category, second_duration time); -void display_cumulative_profiling_times(std::ostream & out); +LEAN_EXPORT void report_profiling_time(std::string const & category, second_duration time); +LEAN_EXPORT void display_cumulative_profiling_times(std::ostream & out); /** Measure time of some task and report it for the final cumulative profile. */ -class time_task { +class LEAN_EXPORT time_task { std::string m_category; optional m_timeit; time_task * m_parent_task; diff --git a/src/library/util.h b/src/library/util.h index c9bb0a18c654..89215739903d 100644 --- a/src/library/util.h +++ b/src/library/util.h @@ -234,7 +234,7 @@ name mk_unsafe_rec_name(name const & n); /** Return some(n') if \c n is a name created using mk_unsafe_rec_name(n') */ optional is_unsafe_rec_name(name const & n); -std::string const & get_version_string(); +LEAN_EXPORT std::string const & get_version_string(); expr const & extract_mdata(expr const &); diff --git a/src/runtime/CMakeLists.txt b/src/runtime/CMakeLists.txt index f69610853169..367c7c298038 100644 --- a/src/runtime/CMakeLists.txt +++ b/src/runtime/CMakeLists.txt @@ -2,7 +2,7 @@ set(RUNTIME_OBJS debug.cpp thread.cpp mpz.cpp utf8.cpp object.cpp apply.cpp exception.cpp interrupt.cpp memory.cpp stackinfo.cpp compact.cpp init_module.cpp load_dynlib.cpp io.cpp hash.cpp platform.cpp alloc.cpp allocprof.cpp sharecommon.cpp stack_overflow.cpp -process.cpp object_ref.cpp mpn.cpp mutex.cpp) +process.cpp object_ref.cpp mpn.cpp mutex.cpp libuv.cpp) add_library(leanrt_initial-exec STATIC ${RUNTIME_OBJS}) set_target_properties(leanrt_initial-exec PROPERTIES ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/src/runtime/libuv.cpp b/src/runtime/libuv.cpp new file mode 100644 index 000000000000..627eb848d3ef --- /dev/null +++ b/src/runtime/libuv.cpp @@ -0,0 +1,22 @@ +/* +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. + +Author: Markus Himmel +*/ +#include "runtime/libuv.h" + +#ifndef LEAN_EMSCRIPTEN +#include + +extern "C" LEAN_EXPORT lean_obj_res lean_libuv_version(lean_obj_arg o) { + return lean_unsigned_to_nat(uv_version()); +} + +#else + +extern "C" LEAN_EXPORT lean_obj_res lean_libuv_version(lean_obj_arg o) { + return lean_box(0); +} + +#endif diff --git a/src/runtime/libuv.h b/src/runtime/libuv.h new file mode 100644 index 000000000000..4c53786a59d6 --- /dev/null +++ b/src/runtime/libuv.h @@ -0,0 +1,10 @@ +/* +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. + +Author: Markus Himmel +*/ +#pragma once +#include + +extern "C" LEAN_EXPORT lean_obj_res lean_libuv_version(lean_obj_arg); diff --git a/src/runtime/object.cpp b/src/runtime/object.cpp index 1d2a74c76511..06128fafe507 100644 --- a/src/runtime/object.cpp +++ b/src/runtime/object.cpp @@ -46,12 +46,18 @@ Author: Leonardo de Moura namespace lean { +static bool should_abort_on_panic() { +#ifdef LEAN_EMSCRIPTEN + return false; +#else + return std::getenv("LEAN_ABORT_ON_PANIC"); +#endif +} + static void abort_on_panic() { -#ifndef LEAN_EMSCRIPTEN - if (std::getenv("LEAN_ABORT_ON_PANIC")) { + if (should_abort_on_panic()) { abort(); } -#endif } extern "C" LEAN_EXPORT void lean_internal_panic(char const * msg) { @@ -83,27 +89,41 @@ extern "C" LEAN_EXPORT void lean_set_panic_messages(bool flag) { g_panic_messages = flag; } +static void panic_eprintln(char const * line) { + if (g_exit_on_panic || should_abort_on_panic()) { + // If we are about to kill the process, we should skip the Lean stderr buffer + std::cerr << line << "\n"; + } else { + io_eprintln(lean_mk_string(line)); + } +} + static void print_backtrace() { #ifdef __GLIBC__ void * bt_buf[100]; int nptrs = backtrace(bt_buf, sizeof(bt_buf) / sizeof(void *)); - backtrace_symbols_fd(bt_buf, nptrs, STDERR_FILENO); - if (nptrs == sizeof(bt_buf)) { - std::cerr << "...\n"; + if (char ** symbols = backtrace_symbols(bt_buf, nptrs)) { + for (int i = 0; i < nptrs; i++) { + panic_eprintln(symbols[i]); + } + // According to `man backtrace`, each `symbols[i]` should NOT be freed + free(symbols); + if (nptrs == sizeof(bt_buf)) { + panic_eprintln("..."); + } } #else - std::cerr << "(stack trace unavailable)\n"; + panic_eprintln("(stack trace unavailable)"); #endif } extern "C" LEAN_EXPORT object * lean_panic_fn(object * default_val, object * msg) { - // TODO(Leo, Kha): add thread local buffer for interpreter. if (g_panic_messages) { - std::cerr << lean_string_cstr(msg) << "\n"; + panic_eprintln(lean_string_cstr(msg)); #ifdef __GLIBC__ char * bt_env = getenv("LEAN_BACKTRACE"); if (!bt_env || strcmp(bt_env, "0") != 0) { - std::cerr << "backtrace:\n"; + panic_eprintln("backtrace:"); print_backtrace(); } #endif diff --git a/src/runtime/stack_overflow.cpp b/src/runtime/stack_overflow.cpp index f1a81f0a00bd..262ebab73982 100644 --- a/src/runtime/stack_overflow.cpp +++ b/src/runtime/stack_overflow.cpp @@ -107,6 +107,7 @@ void initialize_stack_overflow() { action.sa_flags = SA_SIGINFO | SA_ONSTACK; action.sa_sigaction = segv_handler; sigaction(SIGSEGV, &action, nullptr); + sigaction(SIGBUS, &action, nullptr); #endif } diff --git a/src/shell/CMakeLists.txt b/src/shell/CMakeLists.txt index 83de6ba78b4e..fa67b6d6c895 100644 --- a/src/shell/CMakeLists.txt +++ b/src/shell/CMakeLists.txt @@ -1,9 +1,14 @@ -set(SRC lean.cpp) -if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") -set(SRC ${SRC} manifest.rc) -endif() - -add_library(shell OBJECT ${SRC}) +add_library(leanmain STATIC lean.cpp) +set_target_properties(leanmain PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/temp + OUTPUT_NAME leanmain) + +# library must contain at least one non-manifest file +file(WRITE ${CMAKE_BINARY_DIR}/temp/empty.c) +add_library(leanmanifest STATIC ${CMAKE_BINARY_DIR}/temp/empty.c manifest.rc) +set_target_properties(leanmanifest PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/lean + OUTPUT_NAME leanmanifest) if(LLVM) if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") @@ -28,8 +33,8 @@ endif() add_custom_target(lean ALL WORKING_DIRECTORY ${LEAN_SOURCE_DIR} - DEPENDS leanshared shell - COMMAND $(MAKE) -f ${CMAKE_BINARY_DIR}/stdlib.make lean LEAN_SHELL="$" + DEPENDS leanshared leanmain + COMMAND $(MAKE) -f ${CMAKE_BINARY_DIR}/stdlib.make lean COMMAND_EXPAND_LISTS) # use executable of current stage for tests diff --git a/src/stdlib.make.in b/src/stdlib.make.in index 959bca8240b7..ad909c8268f8 100644 --- a/src/stdlib.make.in +++ b/src/stdlib.make.in @@ -52,23 +52,49 @@ ${LIB}/temp/empty.c: # we specify the precise file names here to avoid rebuilds ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libInit_shared${CMAKE_SHARED_LIBRARY_SUFFIX}: ${LIB}/temp/libInit.a.export ${LIB}/temp/libStd.a.export ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a ${LIB}/temp/empty.c -ifeq "${INIT_SHARED_LINKER_FLAGS}" "" - # create empty library on platforms without restrictive symbol limits; avoids costly indirections and troubles with cross-library exceptions - "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ ${LIB}/temp/empty.c ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} -else @echo "[ ] Building $@" +ifeq "${CMAKE_SYSTEM_NAME}" "Windows" # on Windows, must remove file before writing a new one (since the old one may be in use) @rm -f $@ - "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ ${INIT_SHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ \ + -Wl,--whole-archive ${CMAKE_BINARY_DIR}/lib/temp/libInit.a.export ${CMAKE_BINARY_DIR}/lib/temp/libStd.a.export ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a -Wl,--no-whole-archive \ + -Wl,--out-implib,${CMAKE_BINARY_DIR}/lib/lean/libInit_shared.dll.a ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} +else +# create empty library on platforms without restrictive symbol limits; avoids costly indirections and troubles with cross-library exceptions + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ ${LIB}/temp/empty.c ${INIT_SHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} endif Init_shared: ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libInit_shared${CMAKE_SHARED_LIBRARY_SUFFIX} -${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared${CMAKE_SHARED_LIBRARY_SUFFIX}: ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libInit_shared${CMAKE_SHARED_LIBRARY_SUFFIX} ${LIB}/temp/libLean.a.export ${LIB}/lean/libleancpp.a +${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared${CMAKE_SHARED_LIBRARY_SUFFIX}: ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libInit_shared${CMAKE_SHARED_LIBRARY_SUFFIX} ${LIB}/temp/libLean.a.export ${LIB}/lean/libleancpp.a ${LIB}/temp/libleanshell.a ${LIB}/temp/libleaninitialize.a @echo "[ ] Building $@" +ifeq "${CMAKE_SYSTEM_NAME}" "Windows" # on Windows, must remove file before writing a new one (since the old one may be in use) - @rm -f $@ - "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ ${LEANSHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} + @rm -f ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared_1${CMAKE_SHARED_LIBRARY_SUFFIX} +# "half-way point" DLL to avoid symbol limit +# include Lean.Meta.WHNF and leancpp except for `initialize.cpp` + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared_1${CMAKE_SHARED_LIBRARY_SUFFIX} \ + ${LIB}/temp/Lean/Meta/WHNF.o.export -Wl,--start-group ${LIB}/temp/libLean.a.export -Wl,--whole-archive ${LIB}/temp/libleancpp_1.a -Wl,--no-whole-archive -Wl,--end-group -lInit_shared -Wl,--out-implib,${LIB}/lean/libleanshared_1.dll.a ${LEANSHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} -Wl,-Map=${LIB}/temp/libleanshared_1.map +# now delete included symbols from libLean.a + cp ${LIB}/temp/libLean.a.export ${LIB}/temp/diff.a + sed -En 's/.*(\.\.[^:]*\.o\.export):.*/\1/p' ${LIB}/temp/libleanshared_1.map > ${LIB}/temp/diff.a.in +# can't use bundled llvm-ar before LLVM 16, https://github.com/llvm/llvm-project/issues/55023 + ar dP ${LIB}/temp/diff.a @${LIB}/temp/diff.a.in + "${CMAKE_AR}" tP ${LIB}/temp/diff.a +# now include Lean and the diff library in `leanshared` + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ \ + -Wl,--whole-archive ${LIB}/temp/diff.a ${LIB}/temp/libleanshell.a ${LIB}/temp/libleaninitialize.a -Wl,--no-whole-archive -lleanshared_1 -lInit_shared -Wl,--out-implib,${LIB}/lean/libleanshared.dll.a ${LEANSHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} +else +# create empty library on platforms without restrictive symbol limits; avoids costly indirections and troubles with cross-library exceptions + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared_1${CMAKE_SHARED_LIBRARY_SUFFIX} ${LIB}/temp/empty.c ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} +ifeq "${CMAKE_SYSTEM_NAME}" "Darwin" + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ \ + ${LIB}/temp/Lean.o -Wl,-force_load,${LIB}/temp/libleanshell.a -lInit -lStd -lLean -lleancpp ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a ${LEANSHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} +else + "${CMAKE_BINARY_DIR}/leanc.sh" -shared -o $@ \ + -Wl,--whole-archive ${LIB}/temp/Lean.o ${LIB}/temp/libleanshell.a -Wl,--no-whole-archive -Wl,--start-group -lInit -lStd -lLean -lleancpp -Wl,--end-group ${CMAKE_BINARY_DIR}/runtime/libleanrt_initial-exec.a ${LEANSHARED_LINKER_FLAGS} ${TOOLCHAIN_SHARED_LINKER_FLAGS} ${LEANC_OPTS} +endif +endif leanshared: ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared${CMAKE_SHARED_LIBRARY_SUFFIX} @@ -76,11 +102,11 @@ Lake: # lake is in its own subdirectory, so must adjust relative paths... +"${LEAN_BIN}/leanmake" -C lake bin lib PKG=Lake BIN_NAME=lake${CMAKE_EXECUTABLE_SUFFIX} $(LEANMAKE_OPTS) LINK_OPTS='${CMAKE_EXE_LINKER_FLAGS_MAKE_MAKE}' OUT="../${LIB}" LIB_OUT="../${LIB}/lean" OLEAN_OUT="../${LIB}/lean" -${CMAKE_BINARY_DIR}/bin/lean${CMAKE_EXECUTABLE_SUFFIX}: ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libInit_shared${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared${CMAKE_SHARED_LIBRARY_SUFFIX} $(LEAN_SHELL) +${CMAKE_BINARY_DIR}/bin/lean${CMAKE_EXECUTABLE_SUFFIX}: ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libInit_shared${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libleanshared${CMAKE_SHARED_LIBRARY_SUFFIX} ${LIB}/temp/libleanmain.a @echo "[ ] Building $@" # on Windows, must remove file before writing a new one (since the old one may be in use) @rm -f $@ - "${CMAKE_BINARY_DIR}/leanc.sh" $(LEAN_SHELL) ${CMAKE_EXE_LINKER_FLAGS_MAKE} ${LEAN_EXE_LINKER_FLAGS} ${LEANC_OPTS} -o $@ + "${CMAKE_BINARY_DIR}/leanc.sh" ${LIB}/temp/libleanmain.a ${CMAKE_EXE_LINKER_FLAGS_MAKE} ${LEAN_EXE_LINKER_FLAGS} ${LEANC_OPTS} -o $@ lean: ${CMAKE_BINARY_DIR}/bin/lean${CMAKE_EXECUTABLE_SUFFIX} diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index 06594de367de..186877387488 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -5,5 +5,5 @@ add_library(util OBJECT name.cpp name_set.cpp path.cpp lbool.cpp init_module.cpp list_fn.cpp timeit.cpp timer.cpp name_generator.cpp kvmap.cpp map_foreach.cpp - options.cpp option_declarations.cpp shell.cpp + options.cpp option_declarations.cpp "${CMAKE_BINARY_DIR}/util/ffi.cpp") diff --git a/src/util/ffi.cpp b/src/util/ffi.cpp index 6e53d7cca5e3..ee8dcf77288d 100644 --- a/src/util/ffi.cpp +++ b/src/util/ffi.cpp @@ -9,19 +9,19 @@ Author: Sebastian Ullrich #include "runtime/string_ref.h" namespace lean { -extern "C" object * lean_get_leanc_extra_flags(object *) { +LEAN_EXPORT extern "C" object * lean_get_leanc_extra_flags(object *) { return lean_mk_string("@LEANC_EXTRA_FLAGS@"); } -extern "C" object * lean_get_leanc_internal_flags(object *) { +LEAN_EXPORT extern "C" object * lean_get_leanc_internal_flags(object *) { return lean_mk_string("@LEANC_INTERNAL_FLAGS@"); } -extern "C" object * lean_get_linker_flags(uint8 link_static) { +LEAN_EXPORT extern "C" object * lean_get_linker_flags(uint8 link_static) { return lean_mk_string(link_static ? "@LEANC_STATIC_LINKER_FLAGS@ @LEAN_EXTRA_LINKER_FLAGS@" : "@LEANC_SHARED_LINKER_FLAGS@ @LEAN_EXTRA_LINKER_FLAGS@"); } -extern "C" object * lean_get_internal_linker_flags(object *) { +LEAN_EXPORT extern "C" object * lean_get_internal_linker_flags(object *) { return lean_mk_string("@LEANC_INTERNAL_LINKER_FLAGS@"); } } diff --git a/src/util/init_module.cpp b/src/util/init_module.cpp index caf60ed0cfbc..a2015c93f8a1 100644 --- a/src/util/init_module.cpp +++ b/src/util/init_module.cpp @@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ +#include "util/init_module.h" #include "runtime/init_module.h" #include "util/ascii.h" #include "util/name.h" diff --git a/src/util/init_module.h b/src/util/init_module.h index c4fb11fb454d..d70e7be0ca71 100644 --- a/src/util/init_module.h +++ b/src/util/init_module.h @@ -5,8 +5,9 @@ Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #pragma once +#include "runtime/object.h" namespace lean { -void initialize_util_module(); -void finalize_util_module(); +LEAN_EXPORT void initialize_util_module(); +LEAN_EXPORT void finalize_util_module(); } diff --git a/src/util/kvmap.h b/src/util/kvmap.h index ff16d98328a8..5c22c6133208 100644 --- a/src/util/kvmap.h +++ b/src/util/kvmap.h @@ -53,16 +53,16 @@ typedef list_ref kvmap; optional find(kvmap m, name const & k); -optional get_string(kvmap const & m, name const & k); -optional get_nat(kvmap const & m, name const & k); -optional get_bool(kvmap const & m, name const & k); -optional get_name(kvmap const & m, name const & k); +LEAN_EXPORT optional get_string(kvmap const & m, name const & k); +LEAN_EXPORT optional get_nat(kvmap const & m, name const & k); +LEAN_EXPORT optional get_bool(kvmap const & m, name const & k); +LEAN_EXPORT optional get_name(kvmap const & m, name const & k); -kvmap set_string(kvmap const & m, name const & k, string_ref const & v); +LEAN_EXPORT kvmap set_string(kvmap const & m, name const & k, string_ref const & v); inline kvmap set_string(kvmap const & m, name const & k, char const * v) { return set_string(m, k, string_ref(v)); } inline kvmap set_string(kvmap const & m, name const & k, std::string const & v) { return set_string(m, k, string_ref(v)); } -kvmap set_bool(kvmap const & m, name const & k, bool v); -kvmap set_name(kvmap const & m, name const & k, name const & v); -kvmap set_nat(kvmap const & m, name const & k, nat const & v); +LEAN_EXPORT kvmap set_bool(kvmap const & m, name const & k, bool v); +LEAN_EXPORT kvmap set_name(kvmap const & m, name const & k, name const & v); +LEAN_EXPORT kvmap set_nat(kvmap const & m, name const & k, nat const & v); inline kvmap set_nat(kvmap const & m, name const & k, unsigned v) { return set_nat(m, k, nat(v)); } } diff --git a/src/util/name.cpp b/src/util/name.cpp index 9813bdfc84d3..531ebe265c53 100644 --- a/src/util/name.cpp +++ b/src/util/name.cpp @@ -294,7 +294,7 @@ std::string name::escape(char const * sep) const { return s.str(); } -std::ostream & operator<<(std::ostream & out, name const & n) { +LEAN_EXPORT std::ostream & operator<<(std::ostream & out, name const & n) { display_name(out, n, false, lean_name_separator); return out; } diff --git a/src/util/name.h b/src/util/name.h index f15fa1e29d0b..b92b6a360824 100644 --- a/src/util/name.h +++ b/src/util/name.h @@ -49,7 +49,7 @@ inline uint64_t lean_name_hash_exported_b(b_lean_obj_arg n) { enum class name_kind { ANONYMOUS, STRING, NUMERAL }; /** \brief Hierarchical names. */ -class name : public object_ref { +class LEAN_EXPORT name : public object_ref { public: /* Low level primitives */ static bool eq(b_obj_arg n1, b_obj_arg n2) { return lean_name_eq(n1, n2); } @@ -138,7 +138,7 @@ class name : public object_ref { size_t utf8_size() const; /** \brief Return true iff the name contains only safe ASCII chars */ bool is_safe_ascii() const; - friend std::ostream & operator<<(std::ostream & out, name const & n); + friend LEAN_EXPORT std::ostream & operator<<(std::ostream & out, name const & n); /** \brief Concatenate the two given names. */ friend name operator+(name const & n1, name const & n2); @@ -201,7 +201,7 @@ class name : public object_ref { } }; -name string_to_name(std::string const & str); +LEAN_EXPORT name string_to_name(std::string const & str); struct name_hash_fn { unsigned operator()(name const & n) const { return n.hash(); } }; struct name_eq_fn { bool operator()(name const & n1, name const & n2) const { return n1 == n2; } }; diff --git a/src/util/option_declarations.h b/src/util/option_declarations.h index 1dcf2d6a2100..d9eb178d11a4 100644 --- a/src/util/option_declarations.h +++ b/src/util/option_declarations.h @@ -35,7 +35,7 @@ class option_declaration { }; typedef name_map option_declarations; -option_declarations get_option_declarations(); +LEAN_EXPORT option_declarations get_option_declarations(); void register_option(name const & n, name const & decl_name, data_value_kind k, char const * default_value, char const * description); #define register_bool_option(n, v, d) register_option(n, {}, data_value_kind::Bool, LEAN_STR(v), d) #define register_unsigned_option(n, v, d) register_option(n, {}, data_value_kind::Nat, LEAN_STR(v), d) diff --git a/src/util/options.h b/src/util/options.h index 19cc8f1de897..a9240ab35988 100644 --- a/src/util/options.h +++ b/src/util/options.h @@ -71,10 +71,10 @@ class options { object * to_obj_arg() const { return m_value.to_obj_arg(); } }; -bool get_verbose(options const & opts); -name const & get_verbose_opt_name(); -name const & get_max_memory_opt_name(); -name const & get_timeout_opt_name(); +LEAN_EXPORT bool get_verbose(options const & opts); +LEAN_EXPORT name const & get_verbose_opt_name(); +LEAN_EXPORT name const & get_max_memory_opt_name(); +LEAN_EXPORT name const & get_timeout_opt_name(); inline options operator+(options const & opts1, options const & opts2) { return join(opts1, opts2); diff --git a/src/util/path.h b/src/util/path.h index 8170fe767cf2..690bc4e15d83 100644 --- a/src/util/path.h +++ b/src/util/path.h @@ -35,9 +35,9 @@ bool has_file_ext(std::string const & fname, char const * ext); std::string resolve(std::string const & rel_or_abs, std::string const & base); std::string dirname(std::string const & fn); /** \brief Get the file name without the extension. */ -std::string stem(std::string const & fn); +LEAN_EXPORT std::string stem(std::string const & fn); -std::string read_file(std::string const & fname, std::ios_base::openmode mode = std::ios_base::in); +LEAN_EXPORT std::string read_file(std::string const & fname, std::ios_base::openmode mode = std::ios_base::in); bool is_directory(std::string const & fn); optional is_dir(std::string const & fn); @@ -45,5 +45,5 @@ std::vector read_dir(std::string const & dirname); time_t get_mtime(std::string const & fname); -std::string lrealpath(std::string const & fname); +LEAN_EXPORT std::string lrealpath(std::string const & fname); } diff --git a/stage0/stdlib/Lean/Compiler/IR/EmitC.c b/stage0/stdlib/Lean/Compiler/IR/EmitC.c index 9469cfd6fc03..1d6230be38f4 100644 --- a/stage0/stdlib/Lean/Compiler/IR/EmitC.c +++ b/stage0/stdlib/Lean/Compiler/IR/EmitC.c @@ -59,7 +59,6 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__22; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__10; lean_object* l_Lean_IR_EmitC_emitArg___boxed(lean_object*, lean_object*, lean_object*); uint64_t lean_uint64_of_nat(lean_object*); -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__6; lean_object* l_Lean_IR_EmitC_emitUProj(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2; static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__4; @@ -139,13 +138,11 @@ static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__15; static lean_object* l_Lean_IR_EmitC_declareVar___closed__1; static lean_object* l_Lean_IR_EmitC_emitExternCall___closed__1; LEAN_EXPORT lean_object* l_List_foldl___at_Lean_IR_EmitC_emitFnDecls___spec__1___boxed(lean_object*, lean_object*); -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__4; lean_object* l_Nat_nextPowerOfTwo_go(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitSSet(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitFnDecl(lean_object*, uint8_t, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_toCType___closed__4; static lean_object* l_Lean_IR_EmitC_emitReuse___closed__2; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__1; lean_object* l_Lean_IR_EmitC_emitCName(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitInitFn___closed__5; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__24; @@ -154,7 +151,6 @@ static lean_object* l_Lean_IR_EmitC_emitJmp___lambda__1___closed__2; LEAN_EXPORT lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitArgs___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitNumLit___closed__4; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__16; -uint8_t l_Lean_Name_isPrefixOf(lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__19; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__5; static lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitFnDeclAux___spec__1___closed__1; @@ -186,7 +182,6 @@ LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at_Lean_IR_EmitC_emitFnDecls___ static lean_object* l_Lean_IR_EmitC_emitMainFn___closed__2; lean_object* l_Lean_IR_EmitC_emitSet___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitFnBody(lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Name_mkStr3(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forM___at_Lean_IR_EmitC_emitLns___spec__1(lean_object*); static lean_object* l_Lean_IR_EmitC_emitSSet___closed__6; LEAN_EXPORT lean_object* l_Nat_foldM_loop___at_Lean_IR_EmitC_emitSimpleExternalCall___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -208,7 +203,6 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__13; lean_object* l_Lean_IR_EmitC_emitNumLit___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Nat_anyTR_loop___at_Lean_IR_EmitC_overwriteParam___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__3___closed__3; -uint8_t l_Lean_IR_EmitC_shouldExport(lean_object*); static lean_object* l_Lean_IR_EmitC_emitInc___closed__2; lean_object* lean_string_utf8_next(lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); @@ -237,7 +231,6 @@ static lean_object* l_Lean_IR_EmitC_toCName___closed__4; lean_object* l_Lean_IR_EmitC_toCType(lean_object*); lean_object* l_Lean_IR_EmitC_emitBoxFn(lean_object*, lean_object*, lean_object*); lean_object* lean_nat_div(lean_object*, lean_object*); -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__9; static lean_object* l_Lean_IR_EmitC_emitSProj___closed__5; LEAN_EXPORT lean_object* l_List_forM___at_Lean_IR_EmitC_emitFns___spec__1(lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_closureMaxArgs; @@ -247,7 +240,6 @@ lean_object* l_Lean_IR_EmitC_emitUSet(lean_object*, lean_object*, lean_object*, static lean_object* l_String_foldlAux___at_Lean_IR_EmitC_quoteString___spec__1___closed__4; LEAN_EXPORT lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitArgs___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_IR_EmitC_overwriteParam(lean_object*, lean_object*); -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__12; extern lean_object* l_Lean_exportAttr; LEAN_EXPORT lean_object* l_List_forM___at_Lean_IR_EmitC_emitLns___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitTailCall___closed__1; @@ -297,7 +289,6 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__18; static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__12; static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__23; static lean_object* l_Lean_IR_EmitC_emitCtor___closed__1; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__5; lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Nat_anyTR_loop___at_Lean_IR_EmitC_overwriteParam___spec__2(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitInitFn___closed__8; @@ -314,7 +305,6 @@ static lean_object* l_Lean_IR_EmitC_emitInitFn___closed__10; lean_object* l_Lean_IR_EmitC_emitMarkPersistent___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_constName_x3f(lean_object*); static lean_object* l_Lean_IR_EmitC_emitInitFn___closed__11; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__10; static lean_object* l_Lean_IR_EmitC_emitApp___closed__1; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__8; LEAN_EXPORT lean_object* l_String_foldlAux___at_Lean_IR_EmitC_quoteString___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -331,12 +321,10 @@ lean_object* l_Lean_IR_EmitC_emitSet(lean_object*, lean_object*, lean_object*, l static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__6; lean_object* l_Lean_IR_EmitC_emitTailCall___lambda__1(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitFnBody___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_IR_EmitC_shouldExport___boxed(lean_object*); lean_object* l_Lean_IR_EmitC_emitLns___at_Lean_IR_EmitC_emitMainFn___spec__1___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__17; uint8_t lean_name_eq(lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_toCType___closed__1; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__7; lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitBoxFn___closed__1; static lean_object* l_Lean_IR_EmitC_emitFileHeader___closed__24; @@ -350,7 +338,6 @@ lean_object* l_Lean_IR_EmitC_emitIf___boxed(lean_object*, lean_object*, lean_obj lean_object* l_Lean_IR_EmitC_emitBlock(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitApp___closed__5; LEAN_EXPORT lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitTailCall___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__3; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__6; lean_object* l_Lean_IR_EmitC_emitReset___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitPartialApp(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -422,7 +409,6 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__30; static lean_object* l_Lean_IR_EmitC_emitInitFn___closed__7; extern lean_object* l_Lean_NameSet_empty; static lean_object* l_List_forM___at_Lean_IR_EmitC_emitFnDecls___spec__5___closed__1; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__13; static lean_object* l_Lean_IR_EmitC_emitPartialApp___closed__2; static lean_object* l_Lean_IR_EmitC_emitDeclAux___lambda__3___closed__2; lean_object* l_Lean_IR_EmitC_emitCtorScalarSize(lean_object*, lean_object*, lean_object*, lean_object*); @@ -462,8 +448,6 @@ lean_object* l_Lean_IR_EmitC_emit___rarg(lean_object*, lean_object*, lean_object LEAN_EXPORT lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitDeclAux___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_getJPParams___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__8; lean_object* l_Lean_IR_getDecls(lean_object*); static lean_object* l_Lean_IR_EmitC_toCType___closed__5; lean_object* l_Lean_ParametricAttribute_getParam_x3f___rarg(lean_object*, lean_object*, lean_object*, lean_object*); @@ -539,7 +523,6 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__11; size_t lean_usize_sub(size_t, size_t); lean_object* l_Lean_IR_EmitC_emitInitFn___boxed(lean_object*, lean_object*); static lean_object* l_String_foldlAux___at_Lean_IR_EmitC_quoteString___spec__1___closed__1; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__2; static lean_object* l_Lean_IR_EmitC_emitNumLit___closed__1; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__29; static lean_object* l_Lean_IR_EmitC_emitMainFn___closed__1; @@ -604,7 +587,6 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__7; lean_object* l_Lean_IR_collectUsedDecls(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__15; static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__16; -static lean_object* l_Lean_IR_EmitC_shouldExport___closed__11; static lean_object* l_Lean_IR_EmitC_getDecl___closed__1; static lean_object* l_Lean_IR_EmitC_emitInitFn___closed__1; lean_object* l_Lean_IR_Decl_normalizeIds(lean_object*); @@ -637,6 +619,7 @@ static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__1___closed__1; static lean_object* l_Lean_IR_EmitC_toCType___closed__7; lean_object* l_Lean_IR_EmitC_emitOffset___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_foldlMUnsafe_fold___at_Lean_IR_EmitC_emitCase___spec__1___closed__2; +static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__4; lean_object* l_Lean_IR_EmitC_emitCInitName___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_IR_EmitC_emitDeclAux___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_IR_EmitC_emitMainFn___lambda__2___closed__14; @@ -1817,202 +1800,6 @@ lean_dec(x_2); return x_4; } } -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Lean", 4, 4); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__2() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Compiler", 8, 8); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__3() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("LCNF", 4, 4); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__4() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l_Lean_IR_EmitC_shouldExport___closed__1; -x_2 = l_Lean_IR_EmitC_shouldExport___closed__2; -x_3 = l_Lean_IR_EmitC_shouldExport___closed__3; -x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); -return x_4; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__5() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("IR", 2, 2); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__6() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_IR_EmitC_shouldExport___closed__1; -x_2 = l_Lean_IR_EmitC_shouldExport___closed__5; -x_3 = l_Lean_Name_mkStr2(x_1, x_2); -return x_3; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__7() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Server", 6, 6); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__8() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Watchdog", 8, 8); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__9() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l_Lean_IR_EmitC_shouldExport___closed__1; -x_2 = l_Lean_IR_EmitC_shouldExport___closed__7; -x_3 = l_Lean_IR_EmitC_shouldExport___closed__8; -x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); -return x_4; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__10() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("ImportCompletion", 16, 16); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__11() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l_Lean_IR_EmitC_shouldExport___closed__1; -x_2 = l_Lean_IR_EmitC_shouldExport___closed__7; -x_3 = l_Lean_IR_EmitC_shouldExport___closed__10; -x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); -return x_4; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__12() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Completion", 10, 10); -return x_1; -} -} -static lean_object* _init_l_Lean_IR_EmitC_shouldExport___closed__13() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l_Lean_IR_EmitC_shouldExport___closed__1; -x_2 = l_Lean_IR_EmitC_shouldExport___closed__7; -x_3 = l_Lean_IR_EmitC_shouldExport___closed__12; -x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); -return x_4; -} -} -uint8_t l_Lean_IR_EmitC_shouldExport(lean_object* x_1) { -_start: -{ -lean_object* x_2; uint8_t x_3; -x_2 = l_Lean_IR_EmitC_shouldExport___closed__4; -x_3 = l_Lean_Name_isPrefixOf(x_2, x_1); -if (x_3 == 0) -{ -lean_object* x_4; uint8_t x_5; -x_4 = l_Lean_IR_EmitC_shouldExport___closed__6; -x_5 = l_Lean_Name_isPrefixOf(x_4, x_1); -if (x_5 == 0) -{ -lean_object* x_6; uint8_t x_7; -x_6 = l_Lean_IR_EmitC_shouldExport___closed__9; -x_7 = l_Lean_Name_isPrefixOf(x_6, x_1); -if (x_7 == 0) -{ -lean_object* x_8; uint8_t x_9; -x_8 = l_Lean_IR_EmitC_shouldExport___closed__11; -x_9 = l_Lean_Name_isPrefixOf(x_8, x_1); -if (x_9 == 0) -{ -lean_object* x_10; uint8_t x_11; -x_10 = l_Lean_IR_EmitC_shouldExport___closed__13; -x_11 = l_Lean_Name_isPrefixOf(x_10, x_1); -if (x_11 == 0) -{ -uint8_t x_12; -x_12 = 1; -return x_12; -} -else -{ -uint8_t x_13; -x_13 = 0; -return x_13; -} -} -else -{ -uint8_t x_14; -x_14 = 0; -return x_14; -} -} -else -{ -uint8_t x_15; -x_15 = 0; -return x_15; -} -} -else -{ -uint8_t x_16; -x_16 = 0; -return x_16; -} -} -else -{ -uint8_t x_17; -x_17 = 0; -return x_17; -} -} -} -lean_object* l_Lean_IR_EmitC_shouldExport___boxed(lean_object* x_1) { -_start: -{ -uint8_t x_2; lean_object* x_3; -x_2 = l_Lean_IR_EmitC_shouldExport(x_1); -lean_dec(x_1); -x_3 = lean_box(x_2); -return x_3; -} -} LEAN_EXPORT lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitFnDeclAux___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { @@ -2406,60 +2193,56 @@ if (x_10 == 0) { if (x_3 == 0) { -lean_object* x_11; uint8_t x_12; -x_11 = l_Lean_IR_Decl_name(x_1); -x_12 = l_Lean_IR_EmitC_shouldExport(x_11); -lean_dec(x_11); -if (x_12 == 0) -{ -lean_object* x_13; lean_object* x_14; +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_11 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; +x_12 = lean_string_append(x_9, x_11); x_13 = lean_box(0); -x_14 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_13, x_4, x_9); +x_14 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_13, x_4, x_12); lean_dec(x_8); return x_14; } else { -lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; -x_15 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; -x_16 = lean_string_append(x_9, x_15); -x_17 = lean_box(0); -x_18 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_17, x_4, x_16); +lean_object* x_15; lean_object* x_16; +x_15 = lean_box(0); +x_16 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_15, x_4, x_9); lean_dec(x_8); -return x_18; +return x_16; } } else { -lean_object* x_19; lean_object* x_20; -x_19 = lean_box(0); -x_20 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_19, x_4, x_9); +lean_object* x_17; uint8_t x_18; +x_17 = l_Lean_IR_Decl_name(x_1); +x_18 = l_Lean_isClosedTermName(x_8, x_17); +lean_dec(x_17); +if (x_18 == 0) +{ +if (x_3 == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; +x_20 = lean_string_append(x_9, x_19); +x_21 = lean_box(0); +x_22 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_21, x_4, x_20); lean_dec(x_8); -return x_20; -} +return x_22; } else { -lean_object* x_21; uint8_t x_22; -x_21 = l_Lean_IR_Decl_name(x_1); -x_22 = l_Lean_isClosedTermName(x_8, x_21); -lean_dec(x_21); -if (x_22 == 0) -{ -if (x_3 == 0) -{ lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; -x_23 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; +x_23 = l_Lean_IR_EmitC_emitFnDeclAux___closed__2; x_24 = lean_string_append(x_9, x_23); x_25 = lean_box(0); x_26 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_25, x_4, x_24); lean_dec(x_8); return x_26; } +} else { lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_27 = l_Lean_IR_EmitC_emitFnDeclAux___closed__2; +x_27 = l_Lean_IR_EmitC_emitFnDeclAux___closed__3; x_28 = lean_string_append(x_9, x_27); x_29 = lean_box(0); x_30 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_29, x_4, x_28); @@ -2467,17 +2250,6 @@ lean_dec(x_8); return x_30; } } -else -{ -lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_31 = l_Lean_IR_EmitC_emitFnDeclAux___closed__3; -x_32 = lean_string_append(x_9, x_31); -x_33 = lean_box(0); -x_34 = l_Lean_IR_EmitC_emitFnDeclAux___lambda__3(x_1, x_2, x_6, x_8, x_33, x_4, x_32); -lean_dec(x_8); -return x_34; -} -} } } LEAN_EXPORT lean_object* l_Nat_forM_loop___at_Lean_IR_EmitC_emitFnDeclAux___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { @@ -3814,14 +3586,22 @@ return x_21; static lean_object* _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__1() { _start: { +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2() { +_start: +{ lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Lean_IR_EmitC_shouldExport___closed__1; +x_2 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2() { +static lean_object* _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3() { _start: { lean_object* x_1; @@ -3829,7 +3609,7 @@ x_1 = lean_mk_string_unchecked("void lean_initialize_runtime_module();", 38, 38) return x_1; } } -static lean_object* _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3() { +static lean_object* _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__4() { _start: { lean_object* x_1; @@ -3847,12 +3627,12 @@ lean_inc(x_7); x_8 = lean_ctor_get(x_6, 1); lean_inc(x_8); lean_dec(x_6); -x_9 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__1; +x_9 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2; x_10 = l_Lean_IR_usesModuleFrom(x_7, x_9); if (x_10 == 0) { lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_11 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2; +x_11 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3; x_12 = lean_string_append(x_8, x_11); x_13 = l_Lean_IR_EmitC_emitLn___rarg___closed__1; x_14 = lean_string_append(x_12, x_13); @@ -3863,7 +3643,7 @@ return x_16; else { lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; -x_17 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3; +x_17 = l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__4; x_18 = lean_string_append(x_8, x_17); x_19 = l_Lean_IR_EmitC_emitLn___rarg___closed__1; x_20 = lean_string_append(x_18, x_19); @@ -13731,19 +13511,19 @@ lean_inc(x_10); x_11 = l_Lean_hasInitAttr(x_5, x_10); if (x_11 == 0) { -uint8_t x_71; -x_71 = 0; -x_12 = x_71; -goto block_70; +uint8_t x_65; +x_65 = 0; +x_12 = x_65; +goto block_64; } else { -uint8_t x_72; -x_72 = 1; -x_12 = x_72; -goto block_70; +uint8_t x_66; +x_66 = 1; +x_12 = x_66; +goto block_64; } -block_70: +block_64: { if (x_12 == 0) { @@ -13783,205 +13563,177 @@ x_24 = lean_nat_dec_eq(x_22, x_23); lean_dec(x_22); if (x_24 == 0) { -uint8_t x_25; -x_25 = l_Lean_IR_EmitC_shouldExport(x_13); -if (x_25 == 0) -{ -lean_object* x_26; lean_object* x_27; -x_26 = lean_box(0); -x_27 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_20, x_26, x_2, x_21); -lean_dec(x_20); -lean_dec(x_15); -return x_27; -} -else -{ -lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; -x_28 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; -x_29 = lean_string_append(x_21, x_28); -x_30 = lean_box(0); -x_31 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_20, x_30, x_2, x_29); +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_25 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; +x_26 = lean_string_append(x_21, x_25); +x_27 = lean_box(0); +x_28 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_20, x_27, x_2, x_26); lean_dec(x_20); lean_dec(x_15); -return x_31; -} +return x_28; } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_32 = l_Lean_IR_EmitC_emitFnDeclAux___closed__3; -x_33 = lean_string_append(x_21, x_32); -x_34 = lean_box(0); -x_35 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_20, x_34, x_2, x_33); +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_29 = l_Lean_IR_EmitC_emitFnDeclAux___closed__3; +x_30 = lean_string_append(x_21, x_29); +x_31 = lean_box(0); +x_32 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_20, x_31, x_2, x_30); lean_dec(x_20); lean_dec(x_15); -return x_35; +return x_32; } } else { -uint8_t x_36; +uint8_t x_33; lean_dec(x_2); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); lean_dec(x_13); lean_dec(x_10); -x_36 = !lean_is_exclusive(x_19); -if (x_36 == 0) +x_33 = !lean_is_exclusive(x_19); +if (x_33 == 0) { return x_19; } else { -lean_object* x_37; lean_object* x_38; lean_object* x_39; -x_37 = lean_ctor_get(x_19, 0); -x_38 = lean_ctor_get(x_19, 1); -lean_inc(x_38); -lean_inc(x_37); +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_19, 0); +x_35 = lean_ctor_get(x_19, 1); +lean_inc(x_35); +lean_inc(x_34); lean_dec(x_19); -x_39 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_39, 0, x_37); -lean_ctor_set(x_39, 1, x_38); -return x_39; +x_36 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_36, 0, x_34); +lean_ctor_set(x_36, 1, x_35); +return x_36; } } } else { -lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_40 = lean_ctor_get(x_2, 0); -x_41 = lean_ctor_get(x_2, 1); -x_42 = lean_ctor_get(x_2, 3); -x_43 = lean_ctor_get(x_2, 4); -lean_inc(x_43); -lean_inc(x_42); -lean_inc(x_41); +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_37 = lean_ctor_get(x_2, 0); +x_38 = lean_ctor_get(x_2, 1); +x_39 = lean_ctor_get(x_2, 3); +x_40 = lean_ctor_get(x_2, 4); lean_inc(x_40); +lean_inc(x_39); +lean_inc(x_38); +lean_inc(x_37); lean_dec(x_2); -x_44 = lean_alloc_ctor(0, 5, 0); -lean_ctor_set(x_44, 0, x_40); -lean_ctor_set(x_44, 1, x_41); -lean_ctor_set(x_44, 2, x_9); -lean_ctor_set(x_44, 3, x_42); -lean_ctor_set(x_44, 4, x_43); +x_41 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_41, 0, x_37); +lean_ctor_set(x_41, 1, x_38); +lean_ctor_set(x_41, 2, x_9); +lean_ctor_set(x_41, 3, x_39); +lean_ctor_set(x_41, 4, x_40); lean_inc(x_13); -x_45 = l_Lean_IR_EmitC_toCName(x_13, x_44, x_6); -if (lean_obj_tag(x_45) == 0) +x_42 = l_Lean_IR_EmitC_toCName(x_13, x_41, x_6); +if (lean_obj_tag(x_42) == 0) { -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; uint8_t x_50; -x_46 = lean_ctor_get(x_45, 0); -lean_inc(x_46); -x_47 = lean_ctor_get(x_45, 1); -lean_inc(x_47); +lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; uint8_t x_47; +x_43 = lean_ctor_get(x_42, 0); +lean_inc(x_43); +x_44 = lean_ctor_get(x_42, 1); +lean_inc(x_44); +lean_dec(x_42); +x_45 = lean_array_get_size(x_14); +x_46 = lean_unsigned_to_nat(0u); +x_47 = lean_nat_dec_eq(x_45, x_46); lean_dec(x_45); -x_48 = lean_array_get_size(x_14); -x_49 = lean_unsigned_to_nat(0u); -x_50 = lean_nat_dec_eq(x_48, x_49); -lean_dec(x_48); -if (x_50 == 0) -{ -uint8_t x_51; -x_51 = l_Lean_IR_EmitC_shouldExport(x_13); -if (x_51 == 0) -{ -lean_object* x_52; lean_object* x_53; -x_52 = lean_box(0); -x_53 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_46, x_52, x_44, x_47); -lean_dec(x_46); -lean_dec(x_15); -return x_53; -} -else +if (x_47 == 0) { -lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; -x_54 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; -x_55 = lean_string_append(x_47, x_54); -x_56 = lean_box(0); -x_57 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_46, x_56, x_44, x_55); -lean_dec(x_46); +lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_48 = l_Lean_IR_EmitC_emitFnDeclAux___closed__1; +x_49 = lean_string_append(x_44, x_48); +x_50 = lean_box(0); +x_51 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_43, x_50, x_41, x_49); +lean_dec(x_43); lean_dec(x_15); -return x_57; -} +return x_51; } else { -lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; -x_58 = l_Lean_IR_EmitC_emitFnDeclAux___closed__3; -x_59 = lean_string_append(x_47, x_58); -x_60 = lean_box(0); -x_61 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_46, x_60, x_44, x_59); -lean_dec(x_46); +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_52 = l_Lean_IR_EmitC_emitFnDeclAux___closed__3; +x_53 = lean_string_append(x_44, x_52); +x_54 = lean_box(0); +x_55 = l_Lean_IR_EmitC_emitDeclAux___lambda__3(x_15, x_13, x_14, x_16, x_10, x_43, x_54, x_41, x_53); +lean_dec(x_43); lean_dec(x_15); -return x_61; +return x_55; } } else { -lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; -lean_dec(x_44); +lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; +lean_dec(x_41); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); lean_dec(x_13); lean_dec(x_10); -x_62 = lean_ctor_get(x_45, 0); -lean_inc(x_62); -x_63 = lean_ctor_get(x_45, 1); -lean_inc(x_63); -if (lean_is_exclusive(x_45)) { - lean_ctor_release(x_45, 0); - lean_ctor_release(x_45, 1); - x_64 = x_45; +x_56 = lean_ctor_get(x_42, 0); +lean_inc(x_56); +x_57 = lean_ctor_get(x_42, 1); +lean_inc(x_57); +if (lean_is_exclusive(x_42)) { + lean_ctor_release(x_42, 0); + lean_ctor_release(x_42, 1); + x_58 = x_42; } else { - lean_dec_ref(x_45); - x_64 = lean_box(0); + lean_dec_ref(x_42); + x_58 = lean_box(0); } -if (lean_is_scalar(x_64)) { - x_65 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_58)) { + x_59 = lean_alloc_ctor(1, 2, 0); } else { - x_65 = x_64; + x_59 = x_58; } -lean_ctor_set(x_65, 0, x_62); -lean_ctor_set(x_65, 1, x_63); -return x_65; +lean_ctor_set(x_59, 0, x_56); +lean_ctor_set(x_59, 1, x_57); +return x_59; } } } else { -lean_object* x_66; lean_object* x_67; +lean_object* x_60; lean_object* x_61; lean_dec(x_10); lean_dec(x_9); lean_dec(x_2); lean_dec(x_1); -x_66 = lean_box(0); +x_60 = lean_box(0); if (lean_is_scalar(x_7)) { - x_67 = lean_alloc_ctor(0, 2, 0); + x_61 = lean_alloc_ctor(0, 2, 0); } else { - x_67 = x_7; + x_61 = x_7; } -lean_ctor_set(x_67, 0, x_66); -lean_ctor_set(x_67, 1, x_6); -return x_67; +lean_ctor_set(x_61, 0, x_60); +lean_ctor_set(x_61, 1, x_6); +return x_61; } } else { -lean_object* x_68; lean_object* x_69; +lean_object* x_62; lean_object* x_63; lean_dec(x_10); lean_dec(x_9); lean_dec(x_2); lean_dec(x_1); -x_68 = lean_box(0); +x_62 = lean_box(0); if (lean_is_scalar(x_7)) { - x_69 = lean_alloc_ctor(0, 2, 0); + x_63 = lean_alloc_ctor(0, 2, 0); } else { - x_69 = x_7; + x_63 = x_7; } -lean_ctor_set(x_69, 0, x_68); -lean_ctor_set(x_69, 1, x_6); -return x_69; +lean_ctor_set(x_63, 0, x_62); +lean_ctor_set(x_63, 1, x_6); +return x_63; } } } @@ -15868,32 +15620,6 @@ l_Lean_IR_EmitC_toCName___closed__4 = _init_l_Lean_IR_EmitC_toCName___closed__4( lean_mark_persistent(l_Lean_IR_EmitC_toCName___closed__4); l_Lean_IR_EmitC_toCInitName___closed__1 = _init_l_Lean_IR_EmitC_toCInitName___closed__1(); lean_mark_persistent(l_Lean_IR_EmitC_toCInitName___closed__1); -l_Lean_IR_EmitC_shouldExport___closed__1 = _init_l_Lean_IR_EmitC_shouldExport___closed__1(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__1); -l_Lean_IR_EmitC_shouldExport___closed__2 = _init_l_Lean_IR_EmitC_shouldExport___closed__2(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__2); -l_Lean_IR_EmitC_shouldExport___closed__3 = _init_l_Lean_IR_EmitC_shouldExport___closed__3(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__3); -l_Lean_IR_EmitC_shouldExport___closed__4 = _init_l_Lean_IR_EmitC_shouldExport___closed__4(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__4); -l_Lean_IR_EmitC_shouldExport___closed__5 = _init_l_Lean_IR_EmitC_shouldExport___closed__5(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__5); -l_Lean_IR_EmitC_shouldExport___closed__6 = _init_l_Lean_IR_EmitC_shouldExport___closed__6(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__6); -l_Lean_IR_EmitC_shouldExport___closed__7 = _init_l_Lean_IR_EmitC_shouldExport___closed__7(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__7); -l_Lean_IR_EmitC_shouldExport___closed__8 = _init_l_Lean_IR_EmitC_shouldExport___closed__8(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__8); -l_Lean_IR_EmitC_shouldExport___closed__9 = _init_l_Lean_IR_EmitC_shouldExport___closed__9(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__9); -l_Lean_IR_EmitC_shouldExport___closed__10 = _init_l_Lean_IR_EmitC_shouldExport___closed__10(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__10); -l_Lean_IR_EmitC_shouldExport___closed__11 = _init_l_Lean_IR_EmitC_shouldExport___closed__11(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__11); -l_Lean_IR_EmitC_shouldExport___closed__12 = _init_l_Lean_IR_EmitC_shouldExport___closed__12(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__12); -l_Lean_IR_EmitC_shouldExport___closed__13 = _init_l_Lean_IR_EmitC_shouldExport___closed__13(); -lean_mark_persistent(l_Lean_IR_EmitC_shouldExport___closed__13); l_Nat_forM_loop___at_Lean_IR_EmitC_emitFnDeclAux___spec__1___closed__1 = _init_l_Nat_forM_loop___at_Lean_IR_EmitC_emitFnDeclAux___spec__1___closed__1(); lean_mark_persistent(l_Nat_forM_loop___at_Lean_IR_EmitC_emitFnDeclAux___spec__1___closed__1); l_Lean_IR_EmitC_emitFnDeclAux___lambda__1___closed__1 = _init_l_Lean_IR_EmitC_emitFnDeclAux___lambda__1___closed__1(); @@ -16032,6 +15758,8 @@ l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2 = _init_l_Lean_IR_EmitC_emitM lean_mark_persistent(l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__2); l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3 = _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3(); lean_mark_persistent(l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__3); +l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__4 = _init_l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__4(); +lean_mark_persistent(l_Lean_IR_EmitC_emitMainFn___lambda__4___closed__4); l_Lean_IR_EmitC_emitMainFn___closed__1 = _init_l_Lean_IR_EmitC_emitMainFn___closed__1(); lean_mark_persistent(l_Lean_IR_EmitC_emitMainFn___closed__1); l_Lean_IR_EmitC_emitMainFn___closed__2 = _init_l_Lean_IR_EmitC_emitMainFn___closed__2(); diff --git a/tests/bench/speedcenter.yaml b/tests/bench/speedcenter.yaml index ca5ec6cec921..1bfa5292fb8f 100644 --- a/tests/bench/speedcenter.yaml +++ b/tests/bench/speedcenter.yaml @@ -255,13 +255,13 @@ run: max_block_time: '-1' # Maximum number of benchmarking runs - max_runs: 10 + max_runs: 3 # Maximum time the whole benchmarking should take, -1 == no timeout, supports normal time span expressions max_time: '-1' # Minimum number of benchmarking runs - min_runs: 10 + min_runs: 3 # Output file for the benchmarking results out: run_output.yaml diff --git a/tests/compiler/foreign/Makefile b/tests/compiler/foreign/Makefile index 7c836f30047d..9509698cdc80 100644 --- a/tests/compiler/foreign/Makefile +++ b/tests/compiler/foreign/Makefile @@ -27,7 +27,7 @@ $(BIN_OUT)/testcpp.so: $(CPP_OBJS) | $(BIN_OUT) $(CXX) -shared -o $@ $^ `leanc -shared --print-ldflags` $(BIN_OUT)/test: $(LIB_OUT)/libMain.a $(CPP_OBJS) | $(BIN_OUT) - $(CXX) -o $@ $^ `leanc -shared --print-ldflags` -lInit_shared -lleanshared $(TEST_SHARED_LINK_FLAGS) + $(CXX) -o $@ $^ `leanc -shared --print-ldflags` -lInit_shared -lleanshared_1 -lleanshared $(TEST_SHARED_LINK_FLAGS) run_test: $(BIN_OUT)/test $^ diff --git a/tests/lean/interactive/incrementalCommand.lean b/tests/lean/interactive/incrementalCommand.lean index b760906119a3..e4cb72df766e 100644 --- a/tests/lean/interactive/incrementalCommand.lean +++ b/tests/lean/interactive/incrementalCommand.lean @@ -67,3 +67,17 @@ import Init.Prelude --^ insert: " " --^ collectDiagnostics #eval "import" + +/-! +`where` should not break incrementality +(used to fail with "(kernel) declaration has metavariables '_example'") +-/ +-- RESET +example : False := by + trivial +where + bar : True := by + trivial + --^ sync + --^ insert: " " + --^ collectDiagnostics diff --git a/tests/lean/interactive/incrementalCommand.lean.expected.out b/tests/lean/interactive/incrementalCommand.lean.expected.out index 4ad2601d9b98..d397a33c3394 100644 --- a/tests/lean/interactive/incrementalCommand.lean.expected.out +++ b/tests/lean/interactive/incrementalCommand.lean.expected.out @@ -23,3 +23,13 @@ w "message": "\"import\"\n", "fullRange": {"start": {"line": 5, "character": 0}, "end": {"line": 5, "character": 5}}}]} +{"version": 2, + "uri": "file:///incrementalCommand.lean", + "diagnostics": + [{"source": "Lean 4", + "severity": 1, + "range": + {"start": {"line": 2, "character": 2}, "end": {"line": 2, "character": 9}}, + "message": "tactic 'assumption' failed\n⊢ False", + "fullRange": + {"start": {"line": 2, "character": 2}, "end": {"line": 2, "character": 9}}}]} diff --git a/tests/lean/run/4573.lean b/tests/lean/run/4573.lean new file mode 100644 index 000000000000..23a851482d9b --- /dev/null +++ b/tests/lean/run/4573.lean @@ -0,0 +1,12 @@ +def test : IO Unit := do + let tmpFile := "4573.tmp" + let baseLines := #["foo", "bar", "foobar"] + let content := baseLines[0] ++ "\r\n" ++ baseLines[1] ++ "\n" ++ baseLines[2] + IO.FS.writeFile tmpFile content + let readLines ← IO.FS.lines tmpFile + IO.println <| baseLines == readLines + IO.FS.removeFile tmpFile + +/-- info: true -/ +#guard_msgs in +#eval test diff --git a/tests/lean/run/4670.lean b/tests/lean/run/4670.lean new file mode 100644 index 000000000000..cf16afd5121d --- /dev/null +++ b/tests/lean/run/4670.lean @@ -0,0 +1,62 @@ +/-! +# Check types when pretty printing dot notation for structure projections + +In type mismatch errors, the 'object of dot notation' might not be valid for dot notation. +-/ + +structure Foo : Type where + out : Nat + +/-! +Was printing `true.out`, but it should have been `Foo.out true`. +-/ +/-- +error: application type mismatch + Foo.out true +argument + true +has type + Bool : Type +but is expected to have type + Foo : Type +--- +info: (sorryAx Foo true).out : Nat +-/ +#guard_msgs in #check Foo.out true + +/-! +Verifying that generalized field notation does not have this bug. +-/ +def Foo.out' (f : Foo) : Nat := f.out +/-- +error: application type mismatch + Foo.out' true +argument + true +has type + Bool : Type +but is expected to have type + Foo : Type +--- +info: (sorryAx Foo true).out' : Nat +-/ +#guard_msgs in #check Foo.out' true + +/-! +Verifying that projection notation still pretty prints as normal. +-/ +section +variable (f : Foo) +/-- info: f.out : Nat -/ +#guard_msgs in #check f.out +end + +/-! +Verifying that projection notation still pretty prints through type synonys. +-/ +section +def Baz := Foo +variable (f : Baz) +/-- info: f.out : Nat -/ +#guard_msgs in #check f.out +end diff --git a/tests/lean/run/4920.lean b/tests/lean/run/4920.lean new file mode 100644 index 000000000000..cff5316fdada --- /dev/null +++ b/tests/lean/run/4920.lean @@ -0,0 +1,50 @@ +def Vect (n: Nat) (A: Type u) := {l: List A // l.length = n} + +def Vect.cons (a: A) (v: Vect n A): Vect (n + 1) A := + ⟨a::v.val, by simp [v.property]⟩ + +instance: GetElem (Vect n A) Nat A (λ _ i => i < n) where + getElem vec i _ := match vec with | ⟨l, _⟩ => l[i] + +set_option pp.mvars false +/-- +error: type mismatch + xm[i] +has type + Vect m A : outParam (Type _) +but is expected to have type + A : outParam (Type _) +--- +error: type mismatch + h✝ +has type + i < as.length : Prop +but is expected to have type + ?_ : Type _ +--- +error: failed to prove index is valid, possible solutions: + - Use `have`-expressions to prove the index is valid + - Use `a[i]!` notation instead, runtime check is perfomed, and 'Panic' error message is produced if index is not valid + - Use `a[i]?` notation instead, result is an `Option` type + - Use `a[i]'h` notation instead, where `h` is a proof that index is valid +A : Type _ +m i j : Nat +as : List A +xm : List (Vect m A) +h0 : xm.length = as.length +ih : i < (List.zipWith cons as xm).length +jh : j < m +⊢ ?_ (sorryAx (i < as.length → ?_) true ⋯) j +-/ +#guard_msgs in +theorem Vect.aux + {as: List A} {xm: List (Vect m A)} + (h0: xm.length = as.length) + (ih: i < (List.zipWith cons as xm).length) + (jh: j < m) + : ((List.zipWith cons as xm)[i])[j + 1] = + xm[i]'(by simpa[h0] using ih)[j] := by + -- Correct syntax requires an extra pair of parentheses: + -- (xm[i]'(by simpa[h0] using ih))[j] + -- but `lean` should not crash. + sorry diff --git a/tests/lean/run/4947.lean b/tests/lean/run/4947.lean new file mode 100644 index 000000000000..9ac85f4205a7 --- /dev/null +++ b/tests/lean/run/4947.lean @@ -0,0 +1,6 @@ +universe u +class G (A : outParam Nat) where Z : Type u +export G (Z) +abbrev f (a : Nat) : Nat := 2 ^ a +variable [G (f 0)] +example {s : Z} : s = s := by simp only [Nat.reducePow] diff --git a/tests/lean/run/delabApp.lean b/tests/lean/run/delabApp.lean index 4c96e3486558..4d640dedf054 100644 --- a/tests/lean/run/delabApp.lean +++ b/tests/lean/run/delabApp.lean @@ -1,3 +1,4 @@ +import Lean /-! # Testing features of the app delaborator, including overapplication -/ @@ -77,3 +78,36 @@ def g (a : Nat) (b := 1) (c := 2) (d := 3) := a + b + c + d -- Both the `start` and `stop` arguments are omitted. /-- info: fun a => Array.foldl (fun x y => x + y) 0 a : Array Nat → Nat -/ #guard_msgs in #check fun (a : Array Nat) => a.foldl (fun x y => x + y) 0 + + +/-! +Testing overriding the app delaborator with an `@[app_delab]` +-/ + +def myFun (x : Nat) : Nat := x + +/-- info: myFun 2 : Nat -/ +#guard_msgs in #check myFun 2 + +open Lean PrettyPrinter Delaborator in +@[app_delab myFun] def delabMyFun : Delab := withOverApp 0 `(id) + +/-- info: id✝ 2 : Nat -/ +#guard_msgs in #check myFun 2 + +/-! +Testing `@[app_delab]` error conditions. +-/ + +/-- error: unknown declaration 'noSuchFunction' -/ +#guard_msgs in +open Lean PrettyPrinter Delaborator in +@[app_delab noSuchFunction] def delabErr1 : Delab := withOverApp 0 `(id) + +def A.f := 0 +def B.f := 0 +open A B +/-- error: ambiguous declaration 'f' -/ +#guard_msgs in +open Lean PrettyPrinter Delaborator in +@[app_delab f] def delabErr2 : Delab := withOverApp 0 `(id) diff --git a/tests/lean/run/libuv.lean b/tests/lean/run/libuv.lean new file mode 100644 index 000000000000..0bad76a4aaf0 --- /dev/null +++ b/tests/lean/run/libuv.lean @@ -0,0 +1,11 @@ +import Lean.Runtime + +-- Non-emscripten build: expect the major version of LibUV +/-- info: 1 -/ +#guard_msgs in +#eval if !System.Platform.isEmscripten then Lean.libUVVersion >>> 16 else 1 + +-- Emscripten build: expect 0 +/-- info: 0 -/ +#guard_msgs in +#eval if System.Platform.isEmscripten then Lean.libUVVersion >>> 16 else 0 diff --git a/tests/lean/run/omega.lean b/tests/lean/run/omega.lean index 45ec32062b3d..c3c97f6990ce 100644 --- a/tests/lean/run/omega.lean +++ b/tests/lean/run/omega.lean @@ -398,6 +398,9 @@ example (n : Nat) : 2 * (n * n) = n * n + n * n := by omega -- example (n : Nat) : 2 * n * n = n * n + n * n := by omega -- example (n : Nat) : n * 2 * n = n * n + n * n := by omega +-- From https://leanprover.zulipchat.com/#narrow/stream/270676-lean4/topic/omega.20regression/near/456539091 +example (a : Nat) : a * 1 = a := by omega + /-! ### Fin -/ -- Test `<` diff --git a/tests/lean/run/structure.lean b/tests/lean/run/structure.lean index 8ee536e3ea1b..5ed24f97198e 100644 --- a/tests/lean/run/structure.lean +++ b/tests/lean/run/structure.lean @@ -40,7 +40,6 @@ do let env ← getEnv check $ !isStructure env `Nat check $ !isStructure env `D IO.println (getStructureFieldsFlattened env `S4) - IO.println (getStructureFields env `D) IO.println (getPathToBaseStructure? env `S1 `S4) IO.println (getParentStructures env `S4) IO.println (getAllParentStructures env `S4) @@ -50,7 +49,6 @@ do let env ← getEnv /-- info: #[const2ModIdx, constants, extensions, extraConstNames, header] #[toS2, toS1, x, y, z, toS3, w, s] -#[] (some [S4.toS2, S2.toS1]) #[S2, S3] #[S2, S1, S3] diff --git a/tests/plugin/SnakeLinter.lean.expected.out b/tests/plugin/SnakeLinter.lean.expected.out index c953031e19fc..413b9d0d6379 100644 --- a/tests/plugin/SnakeLinter.lean.expected.out +++ b/tests/plugin/SnakeLinter.lean.expected.out @@ -1 +1 @@ -SnakeLinter.lean:4:4: error: SNAKES!! +SnakeLinter.lean:4:4: error: linter snakeLinter failed: SNAKES!!