diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ad00eeef5d..3655d67034 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -22,6 +22,12 @@ concurrency: jobs: benchmark: runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + strategy: + matrix: + python-version: ['3.9'] + group: [1, 2] steps: # Enable tmate debugging of manually-triggered workflows if the input option was provided @@ -31,41 +37,68 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} - - name: Set up Python 3.9 + + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.9 - - name: Install dependencies + python-version: ${{ matrix.python-version }} + + - name: Restore Python environment cache + id: restore-env + uses: actions/cache/restore@v4 + with: + path: .venv-${{ matrix.python-version }} + key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + + - name: Set up virtual environment if not restored from cache + if: steps.restore-env.outputs.cache-hit != 'true' run: | + gh cache list + python -m venv .venv-${{ matrix.python-version }} + source .venv-${{ matrix.python-version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt - - name: Benchmark with pytest-benchmark + + - name: Benchmark with pytest-benchmark (PR) run: | + source .venv-${{ matrix.python-version }}/bin/activate pwd lscpu cd tests/benchmarks python -m pytest benchmark_cpu_small.py -vv \ --benchmark-save='Latest_Commit' \ --durations=0 \ - --benchmark-save-data + --benchmark-save-data \ + --splits 2 \ + --group ${{ matrix.group }} \ + --splitting-algorithm least_duration + - name: Checkout current master uses: actions/checkout@v4 with: ref: master clean: false + - name: Checkout benchmarks from PR head run: git checkout ${{ github.event.pull_request.head.sha }} -- tests/benchmarks - - name: Benchmark with pytest-benchmark + + - name: Benchmark with pytest-benchmark (MASTER) run: | + source .venv-${{ matrix.python-version }}/bin/activate pwd lscpu cd tests/benchmarks python -m pytest benchmark_cpu_small.py -vv \ --benchmark-save='master' \ --durations=0 \ - --benchmark-save-data - - name: put benchmark results in same folder + --benchmark-save-data \ + --splits 2 \ + --group ${{ matrix.group }} \ + --splitting-algorithm least_duration + + - name: Put benchmark results in same folder run: | + source .venv-${{ matrix.python-version }}/bin/activate pwd cd tests/benchmarks find .benchmarks/ -type f -printf "%T@ %p\n" | sort -n | cut -d' ' -f 2- | tail -n 1 > temp1 @@ -75,22 +108,43 @@ jobs: mkdir compare_results cp $t1 compare_results cp $t2 compare_results + + - name: Download artifact + if: always() + uses: actions/download-artifact@v4 + with: + pattern: benchmark_artifact_* + path: tests/benchmarks + + - name: Unzip artifacts if downloaded + run: | + cd tests/benchmarks + ls + if [ -f tests/benchmarks/benchmark_artifact_*.zip ]; then + unzip tests/benchmarks/benchmark_artifact_*.zip -d tests/benchmarks + else + echo "No benchmark artifact file found." + fi + - name: Compare latest commit results to the master branch results run: | - pwd + source .venv-${{ matrix.python-version }}/bin/activate cd tests/benchmarks + pwd python compare_bench_results.py cat commit_msg.txt - - name: comment PR with the results + + - name: Comment PR with the results uses: thollander/actions-comment-pull-request@v2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: filePath: tests/benchmarks/commit_msg.txt comment_tag: benchmark + - name: Upload benchmark data if: always() uses: actions/upload-artifact@v4 with: - name: benchmark_artifact + name: benchmark_artifact_${{ matrix.group }} path: tests/benchmarks/.benchmarks diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index 993e56ee4b..ae0be3dbb7 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -6,26 +6,48 @@ jobs: black_format: runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + strategy: + matrix: + python-version: ['3.10'] + steps: - uses: actions/checkout@v4 - - name: Set up Python 3.10 + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.10' - - name: Install dependencies + python-version: ${{ matrix.python-version }} + + - name: Restore Python environment cache + id: restore-env + uses: actions/cache/restore@v4 + with: + path: .venv-${{ matrix.python-version }} + key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + + - name: Set up virtual environment if not restored from cache + if: steps.restore-env.outputs.cache-hit != 'true' run: | + gh cache list + python -m venv .venv-${{ matrix.python-version }} + source .venv-${{ matrix.python-version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt + - name: Check files using the black formatter run: | + source .venv-${{ matrix.python-version }}/bin/activate black --version black --check desc/ tests/ || black_return_code=$? echo "BLACK_RETURN_CODE=$black_return_code" >> $GITHUB_ENV black desc/ tests/ + - name: Annotate diff changes using reviewdog uses: reviewdog/action-suggester@v1 with: tool_name: blackfmt + - name: Fail if not formatted run: | exit ${{ env.BLACK_RETURN_CODE }} diff --git a/.github/workflows/cache_dependencies.yml b/.github/workflows/cache_dependencies.yml new file mode 100644 index 0000000000..d1f63538c5 --- /dev/null +++ b/.github/workflows/cache_dependencies.yml @@ -0,0 +1,55 @@ +name: Cache dependencies +# This workflow is triggered every 2 days and updates the Python +# and pip dependencies cache +on: + schedule: + - cron: '30 4 */2 * *' # This triggers the workflow at 4:30 AM every 2 days + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + strategy: + matrix: + python-version: ['3.9', '3.10', '3.11', '3.12'] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Delete old cached file with same python version + run: | + echo "Current Cached files list" + gh cache list + echo "Deleting cached files with pattern: ${{ runner.os }}-venv-${{ matrix.python-version }}-" + for cache_key in $(gh cache list --json key -q ".[] | select(.key | startswith(\"${{ runner.os }}-venv-${{ matrix.python-version }}-\")) | .key"); do + echo "Deleting cache with key: $cache_key" + gh cache delete "$cache_key" + done + + - name: Set up virtual environment + run: | + python -m venv .venv-${{ matrix.python-version }} + source .venv-${{ matrix.python-version }}/bin/activate + python -m pip install --upgrade pip + pip install -r devtools/dev-requirements.txt + + - name: Cache Python environment + id: cache-env + uses: actions/cache@v4 + with: + path: .venv-${{ matrix.python-version }} + key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + + - name: Verify virtual environment activation + run: | + source .venv-${{ matrix.python-version }}/bin/activate + python --version + pip --version + pip list diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index c3ea0f96e9..2eef77dcc0 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -5,17 +5,24 @@ on: [pull_request, workflow_dispatch] jobs: flake8_linting: runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.10'] + name: Linting steps: - uses: actions/checkout@v4 - - name: Set up Python 3.10 + - name: Set up Python uses: actions/setup-python@v5 with: - python-version: '3.10' + python-version: ${{ matrix.python-version }} + + # For some reason, loading venv makes this way slower - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt + - name: flake8 Lint uses: reviewdog/action-flake8@v3 with: diff --git a/.github/workflows/nbtests.yml b/.github/workflows/notebook_tests.yml similarity index 56% rename from .github/workflows/nbtests.yml rename to .github/workflows/notebook_tests.yml index 6d1fc6ca24..4dcbe54e14 100644 --- a/.github/workflows/nbtests.yml +++ b/.github/workflows/notebook_tests.yml @@ -18,28 +18,46 @@ jobs: notebook_tests: runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} strategy: matrix: python-version: ['3.10'] - group: [1, 2] + group: [1, 2, 3] steps: - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies + + - name: Restore Python environment cache + id: restore-env + uses: actions/cache/restore@v4 + with: + path: .venv-${{ matrix.python-version }} + key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + + - name: Set up virtual environment if not restored from cache + if: steps.restore-env.outputs.cache-hit != 'true' run: | + gh cache list + python -m venv .venv-${{ matrix.python-version }} + source .venv-${{ matrix.python-version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt + - name: Test notebooks with pytest and nbmake run: | + source .venv-${{ matrix.python-version }}/bin/activate pwd lscpu export PYTHONPATH=$(pwd) pytest -v --nbmake "./docs/notebooks" \ --nbmake-timeout=2000 \ --ignore=./docs/notebooks/zernike_eval.ipynb \ - --splits 2 \ + --splits 3 \ --group ${{ matrix.group }} \ + --splitting-algorithm least_duration diff --git a/.github/workflows/regression_test.yml b/.github/workflows/regression_tests.yml similarity index 72% rename from .github/workflows/regression_test.yml rename to .github/workflows/regression_tests.yml index d9ef1072d6..021d25a7a3 100644 --- a/.github/workflows/regression_test.yml +++ b/.github/workflows/regression_tests.yml @@ -18,6 +18,8 @@ jobs: regression_tests: runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} strategy: matrix: python-version: ['3.10'] @@ -29,20 +31,36 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install dependencies + + - name: Restore Python environment cache + id: restore-env + uses: actions/cache/restore@v4 + with: + path: .venv-${{ matrix.python-version }} + key: ${{ runner.os }}-venv-${{ matrix.python-version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + + - name: Set up virtual environment if not restored from cache + if: steps.restore-env.outputs.cache-hit != 'true' run: | + gh cache list + python -m venv .venv-${{ matrix.python-version }} + source .venv-${{ matrix.python-version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.7.2 + - name: Set Swap Space uses: pierotofy/set-swap-space@master with: swap-size-gb: 10 + - name: Test with pytest run: | + source .venv-${{ matrix.python-version }}/bin/activate + pip install matplotlib==3.7.2 pwd lscpu - python -m pytest -v -m regression \ + python -m pytest -v -m regression\ --durations=0 \ --cov-report xml:cov.xml \ --cov-config=setup.cfg \ @@ -54,6 +72,7 @@ jobs: --group ${{ matrix.group }} \ --splitting-algorithm least_duration \ --db ./prof.db + - name: save coverage file and plot comparison results if: always() uses: actions/upload-artifact@v4 @@ -63,6 +82,7 @@ jobs: ./cov.xml ./mpl_results.html ./prof.db + - name: Upload coverage id : codecov uses: Wandalen/wretry.action@v1.3.0 diff --git a/.github/workflows/unittest.yml b/.github/workflows/unit_tests.yml similarity index 67% rename from .github/workflows/unittest.yml rename to .github/workflows/unit_tests.yml index 57e5881a05..bf95d25b28 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unit_tests.yml @@ -18,12 +18,18 @@ jobs: unit_tests: runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} strategy: matrix: combos: [{group: 1, python_version: '3.9'}, {group: 2, python_version: '3.10'}, {group: 3, python_version: '3.11'}, - {group: 4, python_version: '3.12'}] + {group: 4, python_version: '3.12'}, + {group: 5, python_version: '3.12'}, + {group: 6, python_version: '3.12'}, + {group: 7, python_version: '3.12'}, + {group: 8, python_version: '3.12'}] steps: - uses: actions/checkout@v4 @@ -31,17 +37,33 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.combos.python_version }} - - name: Install dependencies + + - name: Restore Python environment cache + id: restore-env + uses: actions/cache/restore@v4 + with: + path: .venv-${{ matrix.combos.python_version }} + key: ${{ runner.os }}-venv-${{ matrix.combos.python_version }}-${{ hashFiles('devtools/dev-requirements.txt', 'requirements.txt') }} + + - name: Set up virtual environment if not restored from cache + if: steps.restore-env.outputs.cache-hit != 'true' run: | + gh cache list + python -m venv .venv-${{ matrix.combos.python_version }} + source .venv-${{ matrix.combos.python_version }}/bin/activate python -m pip install --upgrade pip pip install -r devtools/dev-requirements.txt pip install matplotlib==3.7.2 + - name: Set Swap Space uses: pierotofy/set-swap-space@master with: swap-size-gb: 10 + - name: Test with pytest run: | + source .venv-${{ matrix.combos.python_version }}/bin/activate + pip install matplotlib==3.7.2 pwd lscpu python -m pytest -v -m unit \ @@ -52,10 +74,11 @@ jobs: --mpl \ --mpl-results-path=mpl_results.html \ --mpl-generate-summary=html \ - --splits 4 \ + --splits 8 \ --group ${{ matrix.combos.group }} \ --splitting-algorithm least_duration \ --db ./prof.db + - name: save coverage file and plot comparison results if: always() uses: actions/upload-artifact@v4 @@ -65,6 +88,7 @@ jobs: ./cov.xml ./mpl_results.html ./prof.db + - name: Upload coverage id : codecov uses: Wandalen/wretry.action@v1.3.0 diff --git a/.github/workflows/scheduled.yml b/.github/workflows/weekly_tests.yml similarity index 75% rename from .github/workflows/scheduled.yml rename to .github/workflows/weekly_tests.yml index a584db5cb8..2fb309bd8a 100644 --- a/.github/workflows/scheduled.yml +++ b/.github/workflows/weekly_tests.yml @@ -11,11 +11,10 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - combos: [{group: 1, python_version: '3.8'}, - {group: 2, python_version: '3.9'}, - {group: 3, python_version: '3.10'}, - {group: 4, python_version: '3.11'}, - {group: 5, python_version: '3.12'}] + combos: [{group: 1, python_version: '3.9'}, + {group: 2, python_version: '3.10'}, + {group: 3, python_version: '3.11'}, + {group: 4, python_version: '3.12'}] steps: - uses: actions/checkout@v4 @@ -37,6 +36,6 @@ jobs: lscpu python -m pytest -v -m unit \ --durations=0 \ - --splits 5 \ + --splits 4 \ --group ${{ matrix.combos.group }} \ --splitting-algorithm least_duration diff --git a/desc/compute/_core.py b/desc/compute/_core.py index 7bc6e8acb3..c61b6974e6 100644 --- a/desc/compute/_core.py +++ b/desc/compute/_core.py @@ -2764,6 +2764,25 @@ def _phi_rr(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="phi_rrz", + label="\\partial_{\\rho \\rho \\zeta} \\phi", + units="rad", + units_long="radians", + description="Toroidal angle in lab frame, second derivative wrt radial coordinate " + "and first wrt DESC toroidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["omega_rrz"], +) +def _phi_rrz(params, transforms, profiles, data, **kwargs): + data["phi_rrz"] = data["omega_rrz"] + return data + + @register_compute_fun( name="phi_rt", label="\\partial_{\\rho \\theta} \\phi", @@ -2783,6 +2802,25 @@ def _phi_rt(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="phi_rtz", + label="\\partial_{\\rho \\theta \\zeta} \\phi", + units="rad", + units_long="radians", + description="Toroidal angle in lab frame, third derivative wrt radial, " + "poloidal, and toroidal coordinates", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["omega_rtz"], +) +def _phi_rtz(params, transforms, profiles, data, **kwargs): + data["phi_rtz"] = data["omega_rtz"] + return data + + @register_compute_fun( name="phi_rz", label="\\partial_{\\rho \\zeta} \\phi", @@ -2802,6 +2840,25 @@ def _phi_rz(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="phi_rzz", + label="\\partial_{\\rho \\zeta \\zeta} \\phi", + units="rad", + units_long="radians", + description="Toroidal angle in lab frame, first derivative wrt radial and " + "second derivative wrt DESC toroidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["omega_rzz"], +) +def _phi_rzz(params, transforms, profiles, data, **kwargs): + data["phi_rzz"] = data["omega_rzz"] + return data + + @register_compute_fun( name="phi_t", label="\\partial_{\\theta} \\phi", @@ -2843,12 +2900,31 @@ def _phi_tt(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="phi_ttz", + label="\\partial_{\\theta \\theta \\zeta} \\phi", + units="rad", + units_long="radians", + description="Toroidal angle in lab frame, second derivative wrt poloidal " + "coordinate and first derivative wrt toroidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["omega_ttz"], +) +def _phi_ttz(params, transforms, profiles, data, **kwargs): + data["phi_ttz"] = data["omega_ttz"] + return data + + @register_compute_fun( name="phi_tz", label="\\partial_{\\theta \\zeta} \\phi", units="rad", units_long="radians", - description="Toroidal angle in lab frame, second derivative wrt poloidal and " + description="Toroidal angle in lab frame, derivative wrt poloidal and " "toroidal coordinate", dim=1, params=[], @@ -2862,6 +2938,25 @@ def _phi_tz(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="phi_tzz", + label="\\partial_{\\theta \\zeta \\zeta} \\phi", + units="rad", + units_long="radians", + description="Toroidal angle in lab frame, derivative wrt poloidal coordinate and " + "second derivative wrt toroidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["omega_tzz"], +) +def _phi_tzz(params, transforms, profiles, data, **kwargs): + data["phi_tzz"] = data["omega_tzz"] + return data + + @register_compute_fun( name="phi_z", label="\\partial_{\\zeta} \\phi", @@ -2903,6 +2998,25 @@ def _phi_zz(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="phi_zzz", + label="\\partial_{\\zeta \\zeta \\zeta} \\phi", + units="rad", + units_long="radians", + description="Toroidal angle in lab frame, third derivative wrt toroidal " + "coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["omega_zzz"], +) +def _phi_zzz(params, transforms, profiles, data, **kwargs): + data["phi_zzz"] = data["omega_zzz"] + return data + + @register_compute_fun( name="rho", label="\\rho", @@ -2986,6 +3100,83 @@ def _theta_PEST_r(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="theta_PEST_rt", + label="\\partial_{\\rho \\theta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, derivative wrt " + "radial and DESC poloidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_rt"], +) +def _theta_PEST_rt(params, transforms, profiles, data, **kwargs): + data["theta_PEST_rt"] = data["lambda_rt"] + return data + + +@register_compute_fun( + name="theta_PEST_rrt", + label="\\partial_{\\rho \\rho \\theta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, second " + "derivative wrt radial coordinate and first derivative wrt DESC poloidal " + "coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_rrt"], +) +def _theta_PEST_rrt(params, transforms, profiles, data, **kwargs): + data["theta_PEST_rrt"] = data["lambda_rrt"] + return data + + +@register_compute_fun( + name="theta_PEST_rtz", + label="\\partial_{\\rho \\theta \\zeta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, derivative wrt " + "radial and DESC poloidal and toroidal coordinates", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_rtz"], +) +def _theta_PEST_rtz(params, transforms, profiles, data, **kwargs): + data["theta_PEST_rtz"] = data["lambda_rtz"] + return data + + +@register_compute_fun( + name="theta_PEST_rtt", + label="\\partial_{\\rho \\theta \\theta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, derivative wrt " + "radial coordinate once and DESC poloidal coordinate twice", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_rtt"], +) +def _theta_PEST_rtt(params, transforms, profiles, data, **kwargs): + data["theta_PEST_rtt"] = data["lambda_rtt"] + return data + + @register_compute_fun( name="theta_PEST_t", label="\\partial_{\\theta} \\vartheta", @@ -3024,6 +3215,25 @@ def _theta_PEST_tt(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="theta_PEST_ttt", + label="\\partial_{\\theta \\theta \\theta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, third " + "derivative wrt poloidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_ttt"], +) +def _theta_PEST_ttt(params, transforms, profiles, data, **kwargs): + data["theta_PEST_ttt"] = data["lambda_ttt"] + return data + + @register_compute_fun( name="theta_PEST_tz", label="\\partial_{\\theta \\zeta} \\vartheta", @@ -3043,6 +3253,25 @@ def _theta_PEST_tz(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="theta_PEST_tzz", + label="\\partial_{\\theta \\zeta \\zeta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, derivative wrt " + "poloidal coordinate once and toroidal coordinate twice", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_tzz"], +) +def _theta_PEST_tzz(params, transforms, profiles, data, **kwargs): + data["theta_PEST_tzz"] = data["lambda_tzz"] + return data + + @register_compute_fun( name="theta_PEST_z", label="\\partial_{\\zeta} \\vartheta", @@ -3081,6 +3310,25 @@ def _theta_PEST_zz(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="theta_PEST_ttz", + label="\\partial_{\\theta \\theta \\zeta} \\vartheta", + units="rad", + units_long="radians", + description="PEST straight field line poloidal angular coordinate, second " + "derivative wrt poloidal coordinate and derivative wrt toroidal coordinate", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["lambda_ttz"], +) +def _theta_PEST_ttz(params, transforms, profiles, data, **kwargs): + data["theta_PEST_ttz"] = data["lambda_ttz"] + return data + + @register_compute_fun( name="zeta", label="\\zeta", diff --git a/desc/compute/_equil.py b/desc/compute/_equil.py index 7cd01491ed..93b2c5232b 100644 --- a/desc/compute/_equil.py +++ b/desc/compute/_equil.py @@ -10,7 +10,7 @@ """ from interpax import interp1d -from scipy.constants import mu_0 +from scipy.constants import elementary_charge, mu_0 from desc.backend import jnp @@ -625,7 +625,7 @@ def _e_sup_helical_times_sqrt_g_mag(params, transforms, profiles, data, **kwargs @register_compute_fun( name="F_anisotropic", - label="F_{anisotropic}", + label="F_{\\mathrm{anisotropic}}", units="N \\cdot m^{-3}", units_long="Newtons / cubic meter", description="Anisotropic force balance error", @@ -843,3 +843,33 @@ def _P_ISS04(params, transforms, profiles, data, **kwargs): ) ) ** (1 / 0.39) return data + + +@register_compute_fun( + name="P_fusion", + label="P_{fusion}", + units="W", + units_long="Watts", + description="Fusion power", + dim=0, + params=[], + transforms={"grid": []}, + profiles=[], + coordinates="", + data=["ni", "", "sqrt(g)"], + resolution_requirement="rtz", + fuel="str: Fusion fuel, assuming a 50/50 mix. One of {'DT'}. Default is 'DT'.", +) +def _P_fusion(params, transforms, profiles, data, **kwargs): + energies = {"DT": 3.52e6 + 14.06e6} # eV + fuel = kwargs.get("fuel", "DT") + energy = energies.get(fuel) + + reaction_rate = jnp.sum( + data["ni"] ** 2 + * data[""] + * data["sqrt(g)"] + * transforms["grid"].weights + ) # reactions/s + data["P_fusion"] = reaction_rate * energy * elementary_charge # J/s + return data diff --git a/desc/compute/_field.py b/desc/compute/_field.py index 31a9d58a18..37732b1cdf 100644 --- a/desc/compute/_field.py +++ b/desc/compute/_field.py @@ -74,11 +74,12 @@ def _B_sup_rho(params, transforms, profiles, data, **kwargs): transforms={}, profiles=[], coordinates="rtz", - data=["psi_r/sqrt(g)", "iota", "lambda_z", "omega_z"], + data=["psi_r/sqrt(g)", "iota", "phi_z", "lambda_z"], ) def _B_sup_theta(params, transforms, profiles, data, **kwargs): + # Assumes θ = ϑ − λ. data["B^theta"] = data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"] + data["iota"] * data["phi_z"] - data["lambda_z"] ) return data @@ -94,11 +95,12 @@ def _B_sup_theta(params, transforms, profiles, data, **kwargs): transforms={}, profiles=[], coordinates="rtz", - data=["psi_r/sqrt(g)", "iota", "lambda_t", "omega_t"], + data=["psi_r/sqrt(g)", "iota", "theta_PEST_t", "omega_t"], ) def _B_sup_zeta(params, transforms, profiles, data, **kwargs): + # Assumes ζ = ϕ − ω. data["B^zeta"] = data["psi_r/sqrt(g)"] * ( - -data["iota"] * data["omega_t"] + data["lambda_t"] + 1 + -data["iota"] * data["omega_t"] + data["theta_PEST_t"] ) return data @@ -224,21 +226,18 @@ def _psi_r_over_sqrtg_r(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_r", "iota", "iota_r", + "phi_rz", + "phi_z", "lambda_rz", "lambda_z", - "omega_rz", - "omega_z", ], ) def _B_sup_theta_r(params, transforms, profiles, data, **kwargs): data["B^theta_r"] = data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_rz"] - + data["iota_r"] * data["omega_z"] - + data["iota_r"] + data["iota"] * data["phi_rz"] + + data["iota_r"] * data["phi_z"] - data["lambda_rz"] - ) + data["(psi_r/sqrt(g))_r"] * ( - data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"] - ) + ) + data["(psi_r/sqrt(g))_r"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) return data @@ -261,8 +260,8 @@ def _B_sup_theta_r(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_r", "iota", "iota_r", - "lambda_rt", - "lambda_t", + "theta_PEST_rt", + "theta_PEST_t", "omega_rt", "omega_t", ], @@ -271,9 +270,9 @@ def _B_sup_zeta_r(params, transforms, profiles, data, **kwargs): data["B^zeta_r"] = data["psi_r/sqrt(g)"] * ( -data["iota"] * data["omega_rt"] - data["iota_r"] * data["omega_t"] - + data["lambda_rt"] + + data["theta_PEST_rt"] ) + data["(psi_r/sqrt(g))_r"] * ( - -data["iota"] * data["omega_t"] + data["lambda_t"] + 1 + -data["iota"] * data["omega_t"] + data["theta_PEST_t"] ) return data @@ -352,16 +351,14 @@ def _psi_r_over_sqrtg_t(params, transforms, profiles, data, **kwargs): "iota", "lambda_tz", "lambda_z", - "omega_tz", - "omega_z", + "phi_tz", + "phi_z", ], ) def _B_sup_theta_t(params, transforms, profiles, data, **kwargs): data["B^theta_t"] = data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_tz"] - data["lambda_tz"] - ) + data["(psi_r/sqrt(g))_t"] * ( - data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"] - ) + data["iota"] * data["phi_tz"] - data["lambda_tz"] + ) + data["(psi_r/sqrt(g))_t"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) return data @@ -383,17 +380,17 @@ def _B_sup_theta_t(params, transforms, profiles, data, **kwargs): "psi_r/sqrt(g)", "(psi_r/sqrt(g))_t", "iota", - "lambda_t", - "lambda_tt", + "theta_PEST_t", + "theta_PEST_tt", "omega_t", "omega_tt", ], ) def _B_sup_zeta_t(params, transforms, profiles, data, **kwargs): data["B^zeta_t"] = data["psi_r/sqrt(g)"] * ( - -data["iota"] * data["omega_tt"] + data["lambda_tt"] + -data["iota"] * data["omega_tt"] + data["theta_PEST_tt"] ) + data["(psi_r/sqrt(g))_t"] * ( - -data["iota"] * data["omega_t"] + data["lambda_t"] + 1 + -data["iota"] * data["omega_t"] + data["theta_PEST_t"] ) return data @@ -472,16 +469,14 @@ def _psi_r_over_sqrtg_z(params, transforms, profiles, data, **kwargs): "iota", "lambda_z", "lambda_zz", - "omega_z", - "omega_zz", + "phi_z", + "phi_zz", ], ) def _B_sup_theta_z(params, transforms, profiles, data, **kwargs): data["B^theta_z"] = data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_zz"] - data["lambda_zz"] - ) + data["(psi_r/sqrt(g))_z"] * ( - data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"] - ) + data["iota"] * data["phi_zz"] - data["lambda_zz"] + ) + data["(psi_r/sqrt(g))_z"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) return data @@ -503,17 +498,17 @@ def _B_sup_theta_z(params, transforms, profiles, data, **kwargs): "psi_r/sqrt(g)", "(psi_r/sqrt(g))_z", "iota", - "lambda_t", - "lambda_tz", + "theta_PEST_t", + "theta_PEST_tz", "omega_t", "omega_tz", ], ) def _B_sup_zeta_z(params, transforms, profiles, data, **kwargs): data["B^zeta_z"] = data["psi_r/sqrt(g)"] * ( - -data["iota"] * data["omega_tz"] + data["lambda_tz"] + -data["iota"] * data["omega_tz"] + data["theta_PEST_tz"] ) + data["(psi_r/sqrt(g))_z"] * ( - -data["iota"] * data["omega_t"] + data["lambda_t"] + 1 + -data["iota"] * data["omega_t"] + data["theta_PEST_t"] ) return data @@ -650,31 +645,28 @@ def _psi_r_over_sqrtg_rr(params, transforms, profiles, data, **kwargs): "lambda_rrz", "lambda_rz", "lambda_z", - "omega_rrz", - "omega_rz", - "omega_z", + "phi_rrz", + "phi_rz", + "phi_z", ], ) def _B_sup_theta_rr(params, transforms, profiles, data, **kwargs): data["B^theta_rr"] = ( data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_rrz"] - + 2 * data["iota_r"] * data["omega_rz"] - + data["iota_rr"] * data["omega_z"] - + data["iota_rr"] + data["iota"] * data["phi_rrz"] + + 2 * data["iota_r"] * data["phi_rz"] + + data["iota_rr"] * data["phi_z"] - data["lambda_rrz"] ) + 2 * data["(psi_r/sqrt(g))_r"] * ( - data["iota"] * data["omega_rz"] - + data["iota_r"] * data["omega_z"] - + data["iota_r"] + data["iota"] * data["phi_rz"] + + data["iota_r"] * data["phi_z"] - data["lambda_rz"] ) - + data["(psi_r/sqrt(g))_rr"] - * (data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"]) + + data["(psi_r/sqrt(g))_rr"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) ) return data @@ -700,9 +692,9 @@ def _B_sup_theta_rr(params, transforms, profiles, data, **kwargs): "iota", "iota_r", "iota_rr", - "lambda_rrt", - "lambda_rt", - "lambda_t", + "theta_PEST_rrt", + "theta_PEST_rt", + "theta_PEST_t", "omega_rrt", "omega_rt", "omega_t", @@ -715,17 +707,17 @@ def _B_sup_zeta_rr(params, transforms, profiles, data, **kwargs): data["iota"] * data["omega_rrt"] + 2 * data["iota_r"] * data["omega_rt"] + data["iota_rr"] * data["omega_t"] - - data["lambda_rrt"] + - data["theta_PEST_rrt"] ) - 2 * data["(psi_r/sqrt(g))_r"] * ( data["iota"] * data["omega_rt"] + data["iota_r"] * data["omega_t"] - - data["lambda_rt"] + - data["theta_PEST_rt"] ) + data["(psi_r/sqrt(g))_rr"] - * (-data["iota"] * data["omega_t"] + data["lambda_t"] + 1) + * (-data["iota"] * data["omega_t"] + data["theta_PEST_t"]) ) return data @@ -820,19 +812,18 @@ def _psi_r_over_sqrtg_tt(params, transforms, profiles, data, **kwargs): "lambda_ttz", "lambda_tz", "lambda_z", - "omega_ttz", - "omega_tz", - "omega_z", + "phi_ttz", + "phi_tz", + "phi_z", ], ) def _B_sup_theta_tt(params, transforms, profiles, data, **kwargs): data["B^theta_tt"] = ( - data["psi_r/sqrt(g)"] * (data["iota"] * data["omega_ttz"] - data["lambda_ttz"]) + data["psi_r/sqrt(g)"] * (data["iota"] * data["phi_ttz"] - data["lambda_ttz"]) + 2 * data["(psi_r/sqrt(g))_t"] - * (data["iota"] * data["omega_tz"] - data["lambda_tz"]) - + data["(psi_r/sqrt(g))_tt"] - * (data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"]) + * (data["iota"] * data["phi_tz"] - data["lambda_tz"]) + + data["(psi_r/sqrt(g))_tt"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) ) return data @@ -856,9 +847,9 @@ def _B_sup_theta_tt(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_t", "(psi_r/sqrt(g))_tt", "iota", - "lambda_t", - "lambda_tt", - "lambda_ttt", + "theta_PEST_t", + "theta_PEST_tt", + "theta_PEST_ttt", "omega_t", "omega_tt", "omega_ttt", @@ -866,12 +857,13 @@ def _B_sup_theta_tt(params, transforms, profiles, data, **kwargs): ) def _B_sup_zeta_tt(params, transforms, profiles, data, **kwargs): data["B^zeta_tt"] = ( - -data["psi_r/sqrt(g)"] * (data["iota"] * data["omega_ttt"] - data["lambda_ttt"]) + -data["psi_r/sqrt(g)"] + * (data["iota"] * data["omega_ttt"] - data["theta_PEST_ttt"]) - 2 * data["(psi_r/sqrt(g))_t"] - * (data["iota"] * data["omega_tt"] - data["lambda_tt"]) + * (data["iota"] * data["omega_tt"] - data["theta_PEST_tt"]) + data["(psi_r/sqrt(g))_tt"] - * (-data["iota"] * data["omega_t"] + data["lambda_t"] + 1) + * (-data["iota"] * data["omega_t"] + data["theta_PEST_t"]) ) return data @@ -966,19 +958,18 @@ def _psi_r_over_sqrtg_zz(params, transforms, profiles, data, **kwargs): "lambda_z", "lambda_zz", "lambda_zzz", - "omega_z", - "omega_zz", - "omega_zzz", + "phi_z", + "phi_zz", + "phi_zzz", ], ) def _B_sup_theta_zz(params, transforms, profiles, data, **kwargs): data["B^theta_zz"] = ( - data["psi_r/sqrt(g)"] * (data["iota"] * data["omega_zzz"] - data["lambda_zzz"]) + data["psi_r/sqrt(g)"] * (data["iota"] * data["phi_zzz"] - data["lambda_zzz"]) + 2 * data["(psi_r/sqrt(g))_z"] - * (data["iota"] * data["omega_zz"] - data["lambda_zz"]) - + data["(psi_r/sqrt(g))_zz"] - * (data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"]) + * (data["iota"] * data["phi_zz"] - data["lambda_zz"]) + + data["(psi_r/sqrt(g))_zz"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) ) return data @@ -1002,9 +993,9 @@ def _B_sup_theta_zz(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_z", "(psi_r/sqrt(g))_zz", "iota", - "lambda_t", - "lambda_tz", - "lambda_tzz", + "theta_PEST_t", + "theta_PEST_tz", + "theta_PEST_tzz", "omega_t", "omega_tz", "omega_tzz", @@ -1012,12 +1003,13 @@ def _B_sup_theta_zz(params, transforms, profiles, data, **kwargs): ) def _B_sup_zeta_zz(params, transforms, profiles, data, **kwargs): data["B^zeta_zz"] = ( - -data["psi_r/sqrt(g)"] * (data["iota"] * data["omega_tzz"] - data["lambda_tzz"]) + -data["psi_r/sqrt(g)"] + * (data["iota"] * data["omega_tzz"] - data["theta_PEST_tzz"]) - 2 * data["(psi_r/sqrt(g))_z"] - * (data["iota"] * data["omega_tz"] - data["lambda_tz"]) + * (data["iota"] * data["omega_tz"] - data["theta_PEST_tz"]) + data["(psi_r/sqrt(g))_zz"] - * (-data["iota"] * data["omega_t"] + data["lambda_t"] + 1) + * (-data["iota"] * data["omega_t"] + data["theta_PEST_t"]) ) return data @@ -1120,31 +1112,29 @@ def _psi_r_over_sqrtg_rt(params, transforms, profiles, data, **kwargs): "lambda_rz", "lambda_tz", "lambda_z", - "omega_rtz", - "omega_rz", - "omega_tz", - "omega_z", + "phi_rtz", + "phi_rz", + "phi_tz", + "phi_z", ], ) def _B_sup_theta_rt(params, transforms, profiles, data, **kwargs): data["B^theta_rt"] = ( data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_rtz"] - + data["iota_r"] * data["omega_tz"] + data["iota"] * data["phi_rtz"] + + data["iota_r"] * data["phi_tz"] - data["lambda_rtz"] ) + data["(psi_r/sqrt(g))_r"] - * (data["iota"] * data["omega_tz"] - data["lambda_tz"]) + * (data["iota"] * data["phi_tz"] - data["lambda_tz"]) + data["(psi_r/sqrt(g))_t"] * ( - data["iota"] * data["omega_rz"] - + data["iota_r"] * data["omega_z"] - + data["iota_r"] + data["iota"] * data["phi_rz"] + + data["iota_r"] * data["phi_z"] - data["lambda_rz"] ) - + data["(psi_r/sqrt(g))_rt"] - * (data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"]) + + data["(psi_r/sqrt(g))_rt"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) ) return data @@ -1170,10 +1160,10 @@ def _B_sup_theta_rt(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_t", "iota", "iota_r", - "lambda_rt", - "lambda_rtt", - "lambda_t", - "lambda_tt", + "theta_PEST_rt", + "theta_PEST_rtt", + "theta_PEST_t", + "theta_PEST_tt", "omega_rt", "omega_rtt", "omega_t", @@ -1186,18 +1176,18 @@ def _B_sup_zeta_rt(params, transforms, profiles, data, **kwargs): * ( data["iota"] * data["omega_rtt"] + data["iota_r"] * data["omega_tt"] - - data["lambda_rtt"] + - data["theta_PEST_rtt"] ) - data["(psi_r/sqrt(g))_r"] - * (data["iota"] * data["omega_tt"] - data["lambda_tt"]) + * (data["iota"] * data["omega_tt"] - data["theta_PEST_tt"]) - data["(psi_r/sqrt(g))_t"] * ( data["iota"] * data["omega_rt"] + data["iota_r"] * data["omega_t"] - - data["lambda_rt"] + - data["theta_PEST_rt"] ) + data["(psi_r/sqrt(g))_rt"] - * (-data["iota"] * data["omega_t"] + data["lambda_t"] + 1) + * (-data["iota"] * data["omega_t"] + data["theta_PEST_t"]) ) return data @@ -1307,21 +1297,20 @@ def _psi_r_over_sqrtg_tz(params, transforms, profiles, data, **kwargs): "lambda_tzz", "lambda_z", "lambda_zz", - "omega_tz", - "omega_tzz", - "omega_z", - "omega_zz", + "phi_tz", + "phi_tzz", + "phi_z", + "phi_zz", ], ) def _B_sup_theta_tz(params, transforms, profiles, data, **kwargs): data["B^theta_tz"] = ( - data["psi_r/sqrt(g)"] * (data["iota"] * data["omega_tzz"] - data["lambda_tzz"]) + data["psi_r/sqrt(g)"] * (data["iota"] * data["phi_tzz"] - data["lambda_tzz"]) + data["(psi_r/sqrt(g))_t"] - * (data["iota"] * data["omega_zz"] - data["lambda_zz"]) + * (data["iota"] * data["phi_zz"] - data["lambda_zz"]) + data["(psi_r/sqrt(g))_z"] - * (data["iota"] * data["omega_tz"] - data["lambda_tz"]) - + data["(psi_r/sqrt(g))_tz"] - * (data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"]) + * (data["iota"] * data["phi_tz"] - data["lambda_tz"]) + + data["(psi_r/sqrt(g))_tz"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) ) return data @@ -1346,10 +1335,10 @@ def _B_sup_theta_tz(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_tz", "(psi_r/sqrt(g))_z", "iota", - "lambda_t", - "lambda_tt", - "lambda_ttz", - "lambda_tz", + "theta_PEST_t", + "theta_PEST_tt", + "theta_PEST_ttz", + "theta_PEST_tz", "omega_t", "omega_tt", "omega_ttz", @@ -1358,13 +1347,14 @@ def _B_sup_theta_tz(params, transforms, profiles, data, **kwargs): ) def _B_sup_zeta_tz(params, transforms, profiles, data, **kwargs): data["B^zeta_tz"] = ( - -data["psi_r/sqrt(g)"] * (data["iota"] * data["omega_ttz"] - data["lambda_ttz"]) + -data["psi_r/sqrt(g)"] + * (data["iota"] * data["omega_ttz"] - data["theta_PEST_ttz"]) - data["(psi_r/sqrt(g))_t"] - * (data["iota"] * data["omega_tz"] - data["lambda_tz"]) + * (data["iota"] * data["omega_tz"] - data["theta_PEST_tz"]) - data["(psi_r/sqrt(g))_z"] - * (data["iota"] * data["omega_tt"] - data["lambda_tt"]) + * (data["iota"] * data["omega_tt"] - data["theta_PEST_tt"]) + data["(psi_r/sqrt(g))_tz"] - * (-data["iota"] * data["omega_t"] + data["lambda_t"] + 1) + * (-data["iota"] * data["omega_t"] + data["theta_PEST_t"]) ) return data @@ -1473,31 +1463,29 @@ def _psi_r_over_sqrtg_rz(params, transforms, profiles, data, **kwargs): "lambda_rzz", "lambda_z", "lambda_zz", - "omega_rz", - "omega_rzz", - "omega_z", - "omega_zz", + "phi_rz", + "phi_rzz", + "phi_z", + "phi_zz", ], ) def _B_sup_theta_rz(params, transforms, profiles, data, **kwargs): data["B^theta_rz"] = ( data["psi_r/sqrt(g)"] * ( - data["iota"] * data["omega_rzz"] - + data["iota_r"] * data["omega_zz"] + data["iota"] * data["phi_rzz"] + + data["iota_r"] * data["phi_zz"] - data["lambda_rzz"] ) + data["(psi_r/sqrt(g))_r"] - * (data["iota"] * data["omega_zz"] - data["lambda_zz"]) + * (data["iota"] * data["phi_zz"] - data["lambda_zz"]) + data["(psi_r/sqrt(g))_z"] * ( - data["iota"] * data["omega_rz"] - + data["iota_r"] * data["omega_z"] - + data["iota_r"] + data["iota"] * data["phi_rz"] + + data["iota_r"] * data["phi_z"] - data["lambda_rz"] ) - + data["(psi_r/sqrt(g))_rz"] - * (data["iota"] * data["omega_z"] + data["iota"] - data["lambda_z"]) + + data["(psi_r/sqrt(g))_rz"] * (data["iota"] * data["phi_z"] - data["lambda_z"]) ) return data @@ -1523,10 +1511,10 @@ def _B_sup_theta_rz(params, transforms, profiles, data, **kwargs): "(psi_r/sqrt(g))_z", "iota", "iota_r", - "lambda_rt", - "lambda_rtz", - "lambda_t", - "lambda_tz", + "theta_PEST_rt", + "theta_PEST_rtz", + "theta_PEST_t", + "theta_PEST_tz", "omega_rt", "omega_rtz", "omega_t", @@ -1539,18 +1527,18 @@ def _B_sup_zeta_rz(params, transforms, profiles, data, **kwargs): * ( data["iota"] * data["omega_rtz"] + data["iota_r"] * data["omega_tz"] - - data["lambda_rtz"] + - data["theta_PEST_rtz"] ) - data["(psi_r/sqrt(g))_r"] - * (data["iota"] * data["omega_tz"] - data["lambda_tz"]) + * (data["iota"] * data["omega_tz"] - data["theta_PEST_tz"]) - data["(psi_r/sqrt(g))_z"] * ( data["iota"] * data["omega_rt"] + data["iota_r"] * data["omega_t"] - - data["lambda_rt"] + - data["theta_PEST_rt"] ) + data["(psi_r/sqrt(g))_rz"] - * (-data["iota"] * data["omega_t"] + data["lambda_t"] + 1) + * (-data["iota"] * data["omega_t"] + data["theta_PEST_t"]) ) return data @@ -1654,6 +1642,25 @@ def _B_sub_zeta(params, transforms, profiles, data, **kwargs): return data +@register_compute_fun( + name="B_phi|r,t", + label="B_{\\phi} = B \\cdot \\mathbf{e}_{\\phi} |_{\\rho, \\theta}", + units="T \\cdot m", + units_long="Tesla * meters", + description="Covariant toroidal component of magnetic field in (ρ,θ,ϕ) " + "coordinates.", + dim=1, + params=[], + transforms={}, + profiles=[], + coordinates="rtz", + data=["B", "e_phi|r,t"], +) +def _B_sub_phi_rt(params, transforms, profiles, data, **kwargs): + data["B_phi|r,t"] = dot(data["B"], data["e_phi|r,t"]) + return data + + @register_compute_fun( name="B_rho_r", label="\\partial_{\\rho} B_{\\rho}", @@ -2262,7 +2269,7 @@ def _B_sub_zeta_rz(params, transforms, profiles, data, **kwargs): @register_compute_fun( name="<|B|>_axis", - label="\\lange |\\mathbf{B}| \\rangle_{axis}", + label="\\langle |\\mathbf{B}| \\rangle_{axis}", units="T", units_long="Tesla", description="Average magnitude of magnetic field on the magnetic axis", diff --git a/desc/compute/_omnigenity.py b/desc/compute/_omnigenity.py index 9a2bff6a01..88b653a084 100644 --- a/desc/compute/_omnigenity.py +++ b/desc/compute/_omnigenity.py @@ -20,10 +20,10 @@ @register_compute_fun( name="B_theta_mn", label="B_{\\theta, m, n}", - units="T \\cdot m}", + units="T \\cdot m", units_long="Tesla * meters", description="Fourier coefficients for covariant poloidal component of " - + "magnetic field", + "magnetic field.", dim=1, params=[], transforms={"B": [[0, 0, 0]], "grid": []}, @@ -47,33 +47,35 @@ def fitfun(x): return data +# TODO: do math to change definition of nu so that we can just use B_zeta_mn here @register_compute_fun( - name="B_zeta_mn", - label="B_{\\zeta, m, n}", - units="T \\cdot m}", + name="B_phi_mn", + label="B_{\\phi, m, n}", + units="T \\cdot m", units_long="Tesla * meters", description="Fourier coefficients for covariant toroidal component of " - + "magnetic field", + "magnetic field in (ρ,θ,ϕ) coordinates.", dim=1, params=[], transforms={"B": [[0, 0, 0]]}, profiles=[], coordinates="rtz", - data=["B_zeta"], + data=["B_phi|r,t"], resolution_requirement="tz", grid_requirement={"is_meshgrid": True}, + aliases="B_zeta_mn", # TODO: remove when phi != zeta M_booz="int: Maximum poloidal mode number for Boozer harmonics. Default 2*eq.M", N_booz="int: Maximum toroidal mode number for Boozer harmonics. Default 2*eq.N", ) -def _B_zeta_mn(params, transforms, profiles, data, **kwargs): - B_zeta = transforms["grid"].meshgrid_reshape(data["B_zeta"], "rtz") +def _B_phi_mn(params, transforms, profiles, data, **kwargs): + B_phi = transforms["grid"].meshgrid_reshape(data["B_phi|r,t"], "rtz") def fitfun(x): return transforms["B"].fit(x.flatten(order="F")) - B_zeta_mn = vmap(fitfun)(B_zeta) + B_zeta_mn = vmap(fitfun)(B_phi) # modes stored as shape(rho, mn) flattened - data["B_zeta_mn"] = B_zeta_mn.flatten() + data["B_phi_mn"] = B_zeta_mn.flatten() return data @@ -89,7 +91,7 @@ def fitfun(x): transforms={"w": [[0, 0, 0]], "B": [[0, 0, 0]], "grid": []}, profiles=[], coordinates="rtz", - data=["B_theta_mn", "B_zeta_mn"], + data=["B_theta_mn", "B_phi_mn"], grid_requirement={"is_meshgrid": True}, M_booz="int: Maximum poloidal mode number for Boozer harmonics. Default 2*eq.M", N_booz="int: Maximum toroidal mode number for Boozer harmonics. Default 2*eq.N", @@ -108,7 +110,7 @@ def _w_mn(params, transforms, profiles, data, **kwargs): (transforms["grid"].num_rho, -1) ) den_t = mask_t @ jnp.abs(wm) - num_z = (mask_z @ sign(wm)) * data["B_zeta_mn"].reshape( + num_z = (mask_z @ sign(wm)) * data["B_phi_mn"].reshape( (transforms["grid"].num_rho, -1) ) den_z = mask_z @ jnp.abs(NFP * wn) @@ -276,10 +278,10 @@ def _nu_z(params, transforms, profiles, data, **kwargs): transforms={}, profiles=[], coordinates="rtz", - data=["theta", "lambda", "iota", "nu"], + data=["theta_PEST", "iota", "nu"], ) def _theta_B(params, transforms, profiles, data, **kwargs): - data["theta_B"] = data["theta"] + data["lambda"] + data["iota"] * data["nu"] + data["theta_B"] = data["theta_PEST"] + data["iota"] * data["nu"] return data @@ -294,10 +296,10 @@ def _theta_B(params, transforms, profiles, data, **kwargs): transforms={}, profiles=[], coordinates="rtz", - data=["zeta", "nu"], + data=["phi", "nu"], ) def _zeta_B(params, transforms, profiles, data, **kwargs): - data["zeta_B"] = data["zeta"] + data["nu"] + data["zeta_B"] = data["phi"] + data["nu"] return data @@ -306,18 +308,20 @@ def _zeta_B(params, transforms, profiles, data, **kwargs): label="\\sqrt{g}_{B}", units="~", units_long="None", - description="Jacobian determinant of Boozer coordinates", + description="Jacobian determinant from Boozer to DESC coordinates", dim=1, params=[], transforms={}, profiles=[], coordinates="rtz", - data=["lambda_t", "lambda_z", "nu_t", "nu_z", "iota"], + data=["theta_PEST_t", "theta_PEST_z", "phi_t", "phi_z", "nu_t", "nu_z", "iota"], ) def _sqrtg_B(params, transforms, profiles, data, **kwargs): - data["sqrt(g)_B"] = (1 + data["lambda_t"]) * (1 + data["nu_z"]) + ( - data["iota"] - data["lambda_z"] - ) * data["nu_t"] + data["sqrt(g)_B"] = ( + data["theta_PEST_t"] * (data["phi_z"] + data["nu_z"]) + - data["theta_PEST_z"] * (data["phi_t"] + data["nu_t"]) + + data["iota"] * (data["nu_t"] * data["phi_z"] - data["nu_z"] * data["phi_t"]) + ) return data diff --git a/desc/compute/_profiles.py b/desc/compute/_profiles.py index 84de48e576..940a463951 100644 --- a/desc/compute/_profiles.py +++ b/desc/compute/_profiles.py @@ -764,6 +764,7 @@ def _iota(params, transforms, profiles, data, **kwargs): data["iota"] = profiles["iota"].compute(transforms["grid"], params["i_l"], dr=0) elif profiles["current"] is not None: # See the document attached to GitHub pull request #556 for the math. + # Assumes ζ = ϕ − ω and θ = ϑ − λ. data["iota"] = transforms["grid"].replace_at_axis( safediv(data["iota_num"], data["iota_den"]), lambda: safediv(data["iota_num_r"], data["iota_den_r"]), @@ -792,6 +793,7 @@ def _iota_r(params, transforms, profiles, data, **kwargs): ) elif profiles["current"] is not None: # See the document attached to GitHub pull request #556 for the math. + # Assumes ζ = ϕ − ω and θ = ϑ − λ. data["iota_r"] = transforms["grid"].replace_at_axis( safediv( data["iota_num_r"] * data["iota_den"] @@ -835,6 +837,7 @@ def _iota_rr(params, transforms, profiles, data, **kwargs): ) elif profiles["current"] is not None: # See the document attached to GitHub pull request #556 for the math. + # Assumes ζ = ϕ − ω and θ = ϑ − λ. data["iota_rr"] = transforms["grid"].replace_at_axis( safediv( data["iota_num_rr"] * data["iota_den"] ** 2 @@ -922,7 +925,7 @@ def _iota_num_current(params, transforms, profiles, data, **kwargs): iota = profiles["iota"].compute(transforms["grid"], params["i_l"], dr=0) data["iota_num current"] = iota * data["iota_den"] - data["iota_num vacuum"] elif profiles["current"] is not None: - # 4π^2 I = 4π^2 (mu_0 current / 2π) = 2π mu_0 current + # 4π² I = 4π² (μ₀ current / 2π) = 2π μ₀ current current = profiles["current"].compute(transforms["grid"], params["c_l"], dr=0) current_r = profiles["current"].compute(transforms["grid"], params["c_l"], dr=1) data["iota_num current"] = ( @@ -954,6 +957,7 @@ def _iota_num_current(params, transforms, profiles, data, **kwargs): ) def _iota_num_vacuum(params, transforms, profiles, data, **kwargs): """Vacuum contribution to the numerator of rotational transform formula.""" + # Assumes ζ = ϕ − ω and θ = ϑ − λ. iota_num_vacuum = transforms["grid"].replace_at_axis( safediv( data["lambda_z"] * data["g_tt"] - (1 + data["lambda_t"]) * data["g_tz"], @@ -1035,6 +1039,7 @@ def _iota_num_r_current(params, transforms, profiles, data, **kwargs): resolution_requirement="tz", ) def _iota_num_r_vacuum(params, transforms, profiles, data, **kwargs): + # Assumes ζ = ϕ − ω and θ = ϑ − λ. iota_num_vacuum = safediv( data["lambda_z"] * data["g_tt"] - (1 + data["lambda_t"]) * data["g_tz"], data["sqrt(g)"], @@ -1154,11 +1159,11 @@ def _iota_num_rr(params, transforms, profiles, data, **kwargs): Computes d2(𝛼+𝛽)/d𝜌2 as defined in the document attached to the description of GitHub pull request #556. 𝛼 supplements the rotational transform with an additional term to account for the enclosed net toroidal current. + Assumes ζ = ϕ − ω and θ = ϑ − λ. """ if profiles["iota"] is not None: data["iota_num_rr"] = jnp.nan * data["0"] elif profiles["current"] is not None: - # 4π^2 I = 4π^2 (mu_0 current / 2π) = 2π mu_0 current current = profiles["current"].compute(transforms["grid"], params["c_l"], dr=0) current_r = profiles["current"].compute(transforms["grid"], params["c_l"], dr=1) current_rr = profiles["current"].compute( @@ -1167,6 +1172,7 @@ def _iota_num_rr(params, transforms, profiles, data, **kwargs): current_rrr = profiles["current"].compute( transforms["grid"], params["c_l"], dr=3 ) + # 4π² I = 4π² (μ₀ current / 2π) = 2π μ₀ current alpha_rr = ( jnp.pi * mu_0 @@ -1283,6 +1289,7 @@ def _iota_num_rrr(params, transforms, profiles, data, **kwargs): Computes d3(𝛼+𝛽)/d𝜌3 as defined in the document attached to the description of GitHub pull request #556. 𝛼 supplements the rotational transform with an additional term to account for the enclosed net toroidal current. + Assumes ζ = ϕ − ω and θ = ϑ − λ. """ if profiles["iota"] is not None: data["iota_num_rrr"] = jnp.nan * data["0"] @@ -1298,7 +1305,7 @@ def _iota_num_rrr(params, transforms, profiles, data, **kwargs): current_rrrr = profiles["current"].compute( transforms["grid"], params["c_l"], dr=4 ) - # 4π^2 I = 4π^2 (mu_0 current / 2π) = 2π mu_0 current + # 4π² I = 4π² (μ₀ current / 2π) = 2π μ₀ current alpha_rrr = ( jnp.pi * mu_0 @@ -1402,14 +1409,14 @@ def _iota_den(params, transforms, profiles, data, **kwargs): """Denominator of rotational transform formula. Computes 𝛾 as defined in the document attached to the description - of GitHub pull request #556. + of GitHub pull request #556. Assumes ζ = ϕ − ω and θ = ϑ − λ. """ gamma = safediv( (1 + data["omega_z"]) * data["g_tt"] - data["omega_t"] * data["g_tz"], data["sqrt(g)"], ) # Assumes toroidal stream function behaves such that the magnetic axis limit - # of gamma is zero (as it would if omega = 0 identically). + # of γ is zero (as it would if ω = 0 identically). gamma = transforms["grid"].replace_at_axis( surface_integrals(transforms["grid"], gamma), 0 ) @@ -1447,7 +1454,7 @@ def _iota_den_r(params, transforms, profiles, data, **kwargs): """Denominator of rotational transform formula, first radial derivative. Computes d𝛾/d𝜌 as defined in the document attached to the description - of GitHub pull request #556. + of GitHub pull request #556. Assumes ζ = ϕ − ω and θ = ϑ − λ. """ gamma = safediv( (1 + data["omega_z"]) * data["g_tt"] - data["omega_t"] * data["g_tz"], @@ -1514,7 +1521,7 @@ def _iota_den_rr(params, transforms, profiles, data, **kwargs): """Denominator of rotational transform formula, second radial derivative. Computes d2𝛾/d𝜌2 as defined in the document attached to the description - of GitHub pull request #556. + of GitHub pull request #556. Assumes ζ = ϕ − ω and θ = ϑ − λ. """ gamma = safediv( (1 + data["omega_z"]) * data["g_tt"] - data["omega_t"] * data["g_tz"], @@ -1609,7 +1616,7 @@ def _iota_den_rrr(params, transforms, profiles, data, **kwargs): """Denominator of rotational transform formula, third radial derivative. Computes d3𝛾/d𝜌3 as defined in the document attached to the description - of GitHub pull request #556. + of GitHub pull request #556. Assumes ζ = ϕ − ω and θ = ϑ − λ. """ gamma = safediv( (1 + data["omega_z"]) * data["g_tt"] - data["omega_t"] * data["g_tz"], @@ -1675,9 +1682,12 @@ def _iota_den_rrr(params, transforms, profiles, data, **kwargs): axis_limit_data=["iota_rr", "psi_rr"], ) def _iota_psi(params, transforms, profiles, data, **kwargs): - # Existence of limit at magnetic axis requires ∂ᵨ iota = 0 at axis. - # Assume iota may be expanded as an even power series of ρ so that this - # condition is satisfied. + """∂ι/∂ψ. + + Existence of limit at magnetic axis requires ∂ι/∂ρ = 0 at axis. + Assume ι may be expanded as an even power series of ρ so that this + condition is satisfied. + """ data["iota_psi"] = transforms["grid"].replace_at_axis( safediv(data["iota_r"], data["psi_r"]), lambda: safediv(data["iota_rr"], data["psi_rr"]), @@ -1909,3 +1919,51 @@ def _shear(params, transforms, profiles, data, **kwargs): None, ) return data + + +@register_compute_fun( + name="", + label="\\langle\\sigma\\nu\\rangle", + units="m^3 \\cdot s^{-1}", + units_long="cubic meters / second", + description="Thermal reactivity from Bosch-Hale parameterization", + dim=1, + params=[], + transforms={"grid": []}, + profiles=[], + coordinates="r", + data=["Ti"], + fuel="str: Fusion fuel, assuming a 50/50 mix. One of {'DT'}. Default is 'DT'.", +) +def _reactivity(params, transforms, profiles, data, **kwargs): + # Bosch and Hale. “Improved Formulas for Fusion Cross-Sections and Thermal + # Reactivities.” Nuclear Fusion 32 (April 1992): 611-631. + # https://doi.org/10.1088/0029-5515/32/4/I07. + coefficients = { + "DT": { + "B_G": 34.382, + "mc2": 1124656, + "C1": 1.17302e-9, + "C2": 1.51361e-2, + "C3": 7.51886e-2, + "C4": 4.60643e-3, + "C5": 1.35000e-2, + "C6": -1.06750e-4, + "C7": 1.36600e-5, + } + } + fuel = kwargs.get("fuel", "DT") + coeffs = coefficients.get(fuel) + + T = data["Ti"] / 1e3 # keV + theta = T / ( + 1 + - (T * (coeffs["C2"] + T * (coeffs["C4"] + T * coeffs["C6"]))) + / (1 + T * (coeffs["C3"] + T * (coeffs["C5"] + T * coeffs["C7"]))) + ) + xi = (coeffs["B_G"] ** 2 / (4 * theta)) ** (1 / 3) + sigma_nu = ( + coeffs["C1"] * theta * jnp.sqrt(xi / (coeffs["mc2"] * T**3)) * jnp.exp(-3 * xi) + ) # cm^3/s + data[""] = sigma_nu / 1e6 # m^3/s + return data diff --git a/desc/equilibrium/coords.py b/desc/equilibrium/coords.py index a89742b40f..bb9b5b8be9 100644 --- a/desc/equilibrium/coords.py +++ b/desc/equilibrium/coords.py @@ -91,13 +91,28 @@ def map_coordinates( # noqa: C901 inbasis = tuple(inbasis) outbasis = tuple(outbasis) + basis_derivs = tuple(f"{X}_{d}" for X in inbasis for d in ("r", "t", "z")) + for key in basis_derivs: + errorif( + key not in data_index["desc.equilibrium.equilibrium.Equilibrium"], + NotImplementedError, + f"don't have recipe to compute partial derivative {key}", + ) + + profiles = get_profiles(inbasis + basis_derivs, eq) + # TODO: make this work for permutations of in/out basis if outbasis == ("rho", "theta", "zeta"): - # TODO: get iota if not supplied using below logic - if inbasis == ("rho", "alpha", "zeta") and "iota" in kwargs: + if inbasis == ("rho", "alpha", "zeta"): + if "iota" in kwargs: + iota = kwargs.pop("iota") + else: + if profiles["iota"] is None: + profiles["iota"] = eq.get_profile(["iota", "iota_r"], params=params) + iota = profiles["iota"].compute(Grid(coords, sort=False, jitable=True)) return _map_clebsch_coordinates( coords, - kwargs.pop("iota"), + iota, params["L_lmn"], eq.L_basis, guess[:, 1] if guess is not None else None, @@ -118,29 +133,20 @@ def map_coordinates( # noqa: C901 **kwargs, ) - basis_derivs = tuple(f"{X}_{d}" for X in inbasis for d in ("r", "t", "z")) - for key in basis_derivs: - errorif( - key not in data_index["desc.equilibrium.equilibrium.Equilibrium"], - NotImplementedError, - f"don't have recipe to compute partial derivative {key}", - ) + # do surface average to get iota once + if "iota" in profiles and profiles["iota"] is None: + profiles["iota"] = eq.get_profile(["iota", "iota_r"], params=params) + params["i_l"] = profiles["iota"].params rhomin = kwargs.pop("rhomin", tol / 10) warnif(period is None, msg="Assuming no periodicity.") period = np.asarray(setdefault(period, (np.inf, np.inf, np.inf))) coords = _periodic(coords, period) - profiles = get_profiles(inbasis + basis_derivs, eq) p = "desc.equilibrium.equilibrium.Equilibrium" names = inbasis + basis_derivs + outbasis deps = list(set(get_data_deps(names, obj=p) + list(names))) - # do surface average to get iota once - if "iota" in profiles and profiles["iota"] is None: - profiles["iota"] = eq.get_profile(["iota", "iota_r"], params=params) - params["i_l"] = profiles["iota"].params - @functools.partial(jit, static_argnums=1) def compute(y, basis): grid = Grid(y, sort=False, jitable=True) diff --git a/desc/geometry/curve.py b/desc/geometry/curve.py index e2c583ea6f..2e141710fb 100644 --- a/desc/geometry/curve.py +++ b/desc/geometry/curve.py @@ -203,13 +203,16 @@ def Z_n(self, new): ) @classmethod - def from_input_file(cls, path): + def from_input_file(cls, path, **kwargs): """Create a axis curve from Fourier coefficients in a DESC or VMEC input file. Parameters ---------- path : Path-like or str Path to DESC or VMEC input file. + **kwargs : dict, optional + keyword arguments to pass to the constructor of the + FourierRZCurve being created. Returns ------- @@ -227,6 +230,7 @@ def from_input_file(cls, path): inputs["axis"][:, 0].astype(int), inputs["NFP"], inputs["sym"], + **kwargs, ) return curve diff --git a/desc/geometry/surface.py b/desc/geometry/surface.py index 79f1b871a9..2f74200aaa 100644 --- a/desc/geometry/surface.py +++ b/desc/geometry/surface.py @@ -298,13 +298,16 @@ def set_coeffs(self, m, n=0, R=None, Z=None): self.Z_lmn = put(self.Z_lmn, idxZ, ZZ) @classmethod - def from_input_file(cls, path): + def from_input_file(cls, path, **kwargs): """Create a surface from Fourier coefficients in a DESC or VMEC input file. Parameters ---------- path : Path-like or str Path to DESC or VMEC input file. + **kwargs : dict, optional + keyword arguments to pass to the constructor of the + FourierRZToroidalSurface being created. Returns ------- @@ -328,6 +331,7 @@ def from_input_file(cls, path): inputs["surface"][:, 1:3].astype(int), inputs["NFP"], inputs["sym"], + **kwargs, ) return surf diff --git a/desc/grid.py b/desc/grid.py index ee471e5d1b..4f318afcaf 100644 --- a/desc/grid.py +++ b/desc/grid.py @@ -1295,7 +1295,10 @@ def _create_nodes(self, L=1, M=1, N=1, NFP=1): self._N = check_nonnegint(N, "N", False) self._NFP = check_posint(NFP, "NFP", False) self._period = (np.inf, 2 * np.pi, 2 * np.pi / self._NFP) - L = L + 1 + # floor divide (L+2) by 2 bc only need (L+1)/2 points to + # integrate L-th order jacobi polynomial exactly, so this + # ensures we have enough pts for both odd and even L + L = (L + 2) // 2 M = 2 * M + 1 N = 2 * N + 1 diff --git a/desc/objectives/__init__.py b/desc/objectives/__init__.py index 0443a8a504..0fb9ce1329 100644 --- a/desc/objectives/__init__.py +++ b/desc/objectives/__init__.py @@ -11,7 +11,6 @@ QuadraticFlux, ToroidalFlux, ) -from ._confinement import HeatingPowerISS04 from ._equilibrium import ( CurrentDensity, Energy, @@ -39,6 +38,7 @@ QuasisymmetryTripleProduct, QuasisymmetryTwoTerm, ) +from ._power_balance import FusionPower, HeatingPowerISS04 from ._profiles import Pressure, RotationalTransform, Shear, ToroidalCurrent from ._stability import MagneticWell, MercierStability from .getters import ( diff --git a/desc/objectives/_coils.py b/desc/objectives/_coils.py index 5e961523e5..58049ccc9f 100644 --- a/desc/objectives/_coils.py +++ b/desc/objectives/_coils.py @@ -124,6 +124,12 @@ def _prune_coilset_tree(coilset): # get individual coils from coilset coils, structure = tree_flatten(coil, is_leaf=_is_single_coil) + for c in coils: + errorif( + not isinstance(c, _Coil), + TypeError, + f"Expected object of type Coil, got {type(c)}", + ) self._num_coils = len(coils) # map grid to list of length coils diff --git a/desc/objectives/_equilibrium.py b/desc/objectives/_equilibrium.py index 624fd99023..dc2f4bbb22 100644 --- a/desc/objectives/_equilibrium.py +++ b/desc/objectives/_equilibrium.py @@ -557,7 +557,7 @@ class HelicalForceBalance(_Objective): _equilibrium = True _coordinates = "rtz" _units = "(N)" - _print_value_fmt = "Helical force error: {:10.3e}, " + _print_value_fmt = "Helical force error: " def __init__( self, diff --git a/desc/objectives/_confinement.py b/desc/objectives/_power_balance.py similarity index 52% rename from desc/objectives/_confinement.py rename to desc/objectives/_power_balance.py index b768fa5e35..299b248358 100644 --- a/desc/objectives/_confinement.py +++ b/desc/objectives/_power_balance.py @@ -9,6 +9,188 @@ from .objective_funs import _Objective +class FusionPower(_Objective): + """Fusion power. + + P = e E ∫ n^2 ⟨σν⟩ dV (W) + + References + ---------- + https://doi.org/10.1088/0029-5515/32/4/I07. + Improved Formulas for Fusion Cross-Sections and Thermal Reactivities. + H.-S. Bosch and G.M. Hale. Nucl. Fusion April 1992; 32 (4): 611-631. + + Parameters + ---------- + eq : Equilibrium + Equilibrium that will be optimized to satisfy the Objective. + target : {float, ndarray}, optional + Target value(s) of the objective. Only used if bounds is None. + Must be broadcastable to Objective.dim_f. Defaults to ``target=1e9``. + bounds : tuple of {float, ndarray}, optional + Lower and upper bounds on the objective. Overrides target. + Both bounds must be broadcastable to to Objective.dim_f. + Defaults to ``target=1e9``. + weight : {float, ndarray}, optional + Weighting to apply to the Objective, relative to other Objectives. + Must be broadcastable to to Objective.dim_f + normalize : bool, optional + Whether to compute the error in physical units or non-dimensionalize. + normalize_target : bool, optional + Whether target and bounds should be normalized before comparing to computed + values. If `normalize` is `True` and the target is in physical units, + this should also be set to True. + loss_function : {None, 'mean', 'min', 'max'}, optional + Loss function to apply to the objective values once computed. This loss function + is called on the raw compute value, before any shifting, scaling, or + normalization. Note: Has no effect for this objective. + deriv_mode : {"auto", "fwd", "rev"} + Specify how to compute jacobian matrix, either forward mode or reverse mode AD. + "auto" selects forward or reverse mode based on the size of the input and output + of the objective. Has no effect on self.grad or self.hess which always use + reverse mode and forward over reverse mode respectively. + fuel : str, optional + Fusion fuel, assuming a 50/50 mix. One of {'DT'}. Default = 'DT'. + grid : Grid, optional + Collocation grid used to compute the intermediate quantities. + Defaults to ``QuadratureGrid(eq.L_grid, eq.M_grid, eq.N_grid, eq.NFP)``. + name : str, optional + Name of the objective function. + + """ + + _scalar = True + _units = "(W)" + _print_value_fmt = "Fusion power: " + + def __init__( + self, + eq, + target=None, + bounds=None, + weight=1, + normalize=True, + normalize_target=True, + loss_function=None, + deriv_mode="auto", + fuel="DT", + grid=None, + name="fusion power", + ): + errorif( + fuel not in ["DT"], ValueError, f"fuel must be one of ['DT'], got {fuel}." + ) + if target is None and bounds is None: + target = 1e9 + self._fuel = fuel + self._grid = grid + super().__init__( + things=eq, + target=target, + bounds=bounds, + weight=weight, + normalize=normalize, + normalize_target=normalize_target, + loss_function=loss_function, + deriv_mode=deriv_mode, + name=name, + ) + + def build(self, use_jit=True, verbose=1): + """Build constant arrays. + + Parameters + ---------- + use_jit : bool, optional + Whether to just-in-time compile the objective and derivatives. + verbose : int, optional + Level of output. + + """ + eq = self.things[0] + errorif( + eq.electron_density is None, + ValueError, + "Equilibrium must have an electron density profile.", + ) + errorif( + eq.ion_temperature is None, + ValueError, + "Equilibrium must have an ion temperature profile.", + ) + if self._grid is None: + self._grid = QuadratureGrid( + L=eq.L_grid, + M=eq.M_grid, + N=eq.N_grid, + NFP=eq.NFP, + ) + self._dim_f = 1 + self._data_keys = ["P_fusion"] + + timer = Timer() + if verbose > 0: + print("Precomputing transforms") + timer.start("Precomputing transforms") + + self._constants = { + "profiles": get_profiles(self._data_keys, obj=eq, grid=self._grid), + "transforms": get_transforms(self._data_keys, obj=eq, grid=self._grid), + } + + timer.stop("Precomputing transforms") + if verbose > 1: + timer.disp("Precomputing transforms") + + if self._normalize: + scales = compute_scaling_factors(eq) + self._normalization = scales["W_p"] + + super().build(use_jit=use_jit, verbose=verbose) + + def compute(self, params, constants=None): + """Compute fusion power. + + Parameters + ---------- + params : dict + Dictionary of equilibrium or surface degrees of freedom, eg + Equilibrium.params_dict + constants : dict + Dictionary of constant data, eg transforms, profiles etc. Defaults to + self.constants + + Returns + ------- + P : float + Fusion power (W). + + """ + if constants is None: + constants = self.constants + data = compute_fun( + "desc.equilibrium.equilibrium.Equilibrium", + self._data_keys, + params=params, + transforms=constants["transforms"], + profiles=constants["profiles"], + fuel=self.fuel, + ) + return data["P_fusion"] + + @property + def fuel(self): + """str: Fusion fuel, assuming a 50/50 mix. One of {'DT'}. Default = 'DT'.""" + return self._fuel + + @fuel.setter + def fuel(self, new): + errorif( + new not in ["DT"], ValueError, f"fuel must be one of ['DT'], got {new}." + ) + self._fuel = new + + class HeatingPowerISS04(_Objective): """Heating power required by the ISS04 energy confinement time scaling. @@ -64,7 +246,7 @@ class HeatingPowerISS04(_Objective): _scalar = True _units = "(W)" - _print_value_fmt = "Heating power: {:10.3e} " + _print_value_fmt = "Heating power: " def __init__( self, diff --git a/desc/plotting.py b/desc/plotting.py index ea641dbb41..6e875bd2b9 100644 --- a/desc/plotting.py +++ b/desc/plotting.py @@ -1352,10 +1352,9 @@ def plot_section( phi = np.atleast_1d(phi) nphi = len(phi) if grid is None: - nfp = eq.NFP grid_kwargs = { "L": 25, - "NFP": nfp, + "NFP": 1, "axis": False, "theta": np.linspace(0, 2 * np.pi, 91, endpoint=True), "zeta": phi, @@ -1610,9 +1609,14 @@ def plot_surfaces(eq, rho=8, theta=8, phi=None, ax=None, return_data=False, **kw phi = np.atleast_1d(phi) nphi = len(phi) + # do not need NFP supplied to these grids as + # the above logic takes care of the correct phi range + # if defaults are requested. Setting NFP here instead + # can create reshaping issues when phi is supplied and gets + # truncated by 2pi/NFP. See PR #1204 grid_kwargs = { "rho": rho, - "NFP": nfp, + "NFP": 1, "theta": np.linspace(0, 2 * np.pi, NT, endpoint=True), "zeta": phi, } @@ -1631,7 +1635,7 @@ def plot_surfaces(eq, rho=8, theta=8, phi=None, ax=None, return_data=False, **kw ) grid_kwargs = { "rho": np.linspace(0, 1, NR), - "NFP": nfp, + "NFP": 1, "theta": theta, "zeta": phi, } @@ -1960,7 +1964,7 @@ def plot_boundary(eq, phi=None, plot_axis=True, ax=None, return_data=False, **kw plot_axis = plot_axis and eq.L > 0 rho = np.array([0.0, 1.0]) if plot_axis else np.array([1.0]) - grid_kwargs = {"NFP": eq.NFP, "rho": rho, "theta": 100, "zeta": phi} + grid_kwargs = {"NFP": 1, "rho": rho, "theta": 100, "zeta": phi} grid = _get_grid(**grid_kwargs) nr, nt, nz = grid.num_rho, grid.num_theta, grid.num_zeta grid = Grid( @@ -2030,6 +2034,9 @@ def plot_boundaries( ): """Plot stellarator boundaries at multiple toroidal coordinates. + NOTE: If attempting to plot objects with differing NFP, `phi` must + be given explicitly. + Parameters ---------- eqs : array-like of Equilibrium, Surface or EquilibriaFamily @@ -2085,7 +2092,21 @@ def plot_boundaries( fig, ax = plot_boundaries((eq1, eq2, eq3)) """ + # if NFPs are not all equal, means there are + # objects with differing NFPs, which it is not clear + # how to choose the phis for by default, so we will throw an error + # unless phi was given. phi = parse_argname_change(phi, kwargs, "zeta", "phi") + errorif( + not np.allclose([thing.NFP for thing in eqs], eqs[0].NFP) and phi is None, + ValueError, + "supplied objects must have the same number of field periods, " + "or if there are differing field periods, `phi` must be given explicitly." + f" Instead, supplied objects have NFPs {[t.NFP for t in eqs]}." + " If attempting to plot an axisymmetric object with non-axisymmetric objects," + " you must use the `change_resolution` method to make the axisymmetric " + "object have the same NFP as the non-axisymmetric objects.", + ) figsize = kwargs.pop("figsize", None) cmap = kwargs.pop("cmap", "rainbow") @@ -2129,7 +2150,7 @@ def plot_boundaries( plot_axis_i = plot_axis and eqs[i].L > 0 rho = np.array([0.0, 1.0]) if plot_axis_i else np.array([1.0]) - grid_kwargs = {"NFP": eqs[i].NFP, "theta": 100, "zeta": phi, "rho": rho} + grid_kwargs = {"NFP": 1, "theta": 100, "zeta": phi, "rho": rho} grid = _get_grid(**grid_kwargs) nr, nt, nz = grid.num_rho, grid.num_theta, grid.num_zeta grid = Grid( @@ -2198,6 +2219,9 @@ def plot_comparison( ): """Plot comparison between flux surfaces of multiple equilibria. + NOTE: If attempting to plot objects with differing NFP, `phi` must + be given explicitly. + Parameters ---------- eqs : array-like of Equilibrium or EquilibriaFamily @@ -2266,7 +2290,21 @@ def plot_comparison( ) """ + # if NFPs are not all equal, means there are + # objects with differing NFPs, which it is not clear + # how to choose the phis for by default, so we will throw an error + # unless phi was given. phi = parse_argname_change(phi, kwargs, "zeta", "phi") + errorif( + not np.allclose([thing.NFP for thing in eqs], eqs[0].NFP) and phi is None, + ValueError, + "supplied objects must have the same number of field periods, " + "or if there are differing field periods, `phi` must be given explicitly." + f" Instead, supplied objects have NFPs {[t.NFP for t in eqs]}." + " If attempting to plot an axisymmetric object with non-axisymmetric objects," + " you must use the `change_resolution` method to make the axisymmetric " + "object have the same NFP as the non-axisymmetric objects.", + ) color = parse_argname_change(color, kwargs, "colors", "color") ls = parse_argname_change(ls, kwargs, "linestyles", "ls") lw = parse_argname_change(lw, kwargs, "lws", "lw") diff --git a/tests/baseline/test_plot_b_mag.png b/tests/baseline/test_plot_b_mag.png index 9f59728c33..f86a3b1830 100644 Binary files a/tests/baseline/test_plot_b_mag.png and b/tests/baseline/test_plot_b_mag.png differ diff --git a/tests/baseline/test_plot_comparison_different_NFPs.png b/tests/baseline/test_plot_comparison_different_NFPs.png new file mode 100644 index 0000000000..96a140648f Binary files /dev/null and b/tests/baseline/test_plot_comparison_different_NFPs.png differ diff --git a/tests/baseline/test_plot_grid_quad.png b/tests/baseline/test_plot_grid_quad.png index dd9185a7ce..a20bc963a0 100644 Binary files a/tests/baseline/test_plot_grid_quad.png and b/tests/baseline/test_plot_grid_quad.png differ diff --git a/tests/benchmarks/compare_bench_results.py b/tests/benchmarks/compare_bench_results.py index 09fc580e22..ab56816153 100644 --- a/tests/benchmarks/compare_bench_results.py +++ b/tests/benchmarks/compare_bench_results.py @@ -8,60 +8,87 @@ cwd = os.getcwd() data = {} -master_idx = 0 -latest_idx = 0 +master_idx = [] +latest_idx = [] commit_ind = 0 -for diret in os.walk(cwd + "/compare_results"): - files = diret[2] - timing_file_exists = False - - for filename in files: - if filename.find("json") != -1: # check if json output file is present - try: - filepath = os.path.join(diret[0], filename) - with open(filepath) as f: - print(filepath) - curr_data = json.load(f) - commit_id = curr_data["commit_info"]["id"][0:7] - data[commit_id] = curr_data - if filepath.find("master") != -1: - master_idx = commit_ind - elif filepath.find("Latest_Commit") != -1: - latest_idx = commit_ind - commit_ind += 1 - except Exception as e: - print(e) - continue - +folder_names = [] + +for root1, dirs1, files1 in os.walk(cwd): + for dir_name in dirs1: + if dir_name == "compare_results" or dir_name.startswith("benchmark_artifact"): + print("Including folder: " + dir_name) + # "compare_results" is the folder containing the benchmark results from this + # job "benchmark_artifact" is the folder containing the benchmark results + # from other jobs if in future we change the Python version of the + # benchmarks, we will need to update this + # "/Linux-CPython--64bit" + files2walk = ( + os.walk(cwd + "/" + dir_name) + if dir_name == "compare_results" + else os.walk(cwd + "/" + dir_name + "/Linux-CPython-3.9-64bit") + ) + for root, dirs, files in files2walk: + for filename in files: + if ( + filename.find("json") != -1 + ): # check if json output file is present + try: + filepath = os.path.join(root, filename) + with open(filepath) as f: + curr_data = json.load(f) + commit_id = curr_data["commit_info"]["id"][0:7] + data[commit_ind] = curr_data["benchmarks"] + if filepath.find("master") != -1: + master_idx.append(commit_ind) + elif filepath.find("Latest_Commit") != -1: + latest_idx.append(commit_ind) + commit_ind += 1 + except Exception as e: + print(e) + continue # need arrays of size [ num benchmarks x num commits ] # one for mean one for stddev # number of benchmark cases -num_benchmarks = len(data[list(data.keys())[0]]["benchmarks"]) -num_commits = len(list(data.keys())) +num_benchmarks = 0 +# sum number of benchmarks splitted into different jobs +for split in master_idx: + num_benchmarks += len(data[split]) +num_commits = 2 + times = np.zeros([num_benchmarks, num_commits]) stddevs = np.zeros([num_benchmarks, num_commits]) commit_ids = [] test_names = [None] * num_benchmarks -for id_num, commit_id in enumerate(data.keys()): - commit_ids.append(commit_id) - for i, test in enumerate(data[commit_id]["benchmarks"]): +id_num = 0 +for i in master_idx: + for test in data[i]: t_mean = test["stats"]["median"] t_stddev = test["stats"]["iqr"] - times[i, id_num] = t_mean - stddevs[i, id_num] = t_stddev - test_names[i] = test["name"] - + times[id_num, 0] = t_mean + stddevs[id_num, 0] = t_stddev + test_names[id_num] = test["name"] + id_num = id_num + 1 + +id_num = 0 +for i in latest_idx: + for test in data[i]: + t_mean = test["stats"]["median"] + t_stddev = test["stats"]["iqr"] + times[id_num, 1] = t_mean + stddevs[id_num, 1] = t_stddev + test_names[id_num] = test["name"] + id_num = id_num + 1 # we say a slowdown/speedup has occurred if the mean time difference is greater than # n_sigma * (stdev of time delta) significance = 3 # n_sigmas of normal distribution, ie z score of 3 colors = [" "] * num_benchmarks # g if faster, w if similar, r if slower -delta_times_ms = times[:, latest_idx] - times[:, master_idx] -delta_stds_ms = np.sqrt(stddevs[:, latest_idx] ** 2 + stddevs[:, master_idx] ** 2) -delta_times_pct = delta_times_ms / times[:, master_idx] * 100 -delta_stds_pct = delta_stds_ms / times[:, master_idx] * 100 +delta_times_ms = times[:, 1] - times[:, 0] +delta_stds_ms = np.sqrt(stddevs[:, 1] ** 2 + stddevs[:, 0] ** 2) +delta_times_pct = delta_times_ms / times[:, 0] * 100 +delta_stds_pct = delta_stds_ms / times[:, 0] * 100 for i, (pct, spct) in enumerate(zip(delta_times_pct, delta_stds_pct)): if pct > 0 and pct > significance * spct: colors[i] = "-" # this will make the line red @@ -72,8 +99,6 @@ # now make the commit message, save as a txt file # benchmark_name dt(%) dt(s) t_new(s) t_old(s) -print(latest_idx) -print(master_idx) commit_msg_lines = [ "```diff", f"| {'benchmark_name':^38} | {'dt(%)':^22} | {'dt(s)':^22} |" @@ -88,8 +113,8 @@ line = f"{colors[i]:>1}{test_names[i]:<39} |" line += f" {f'{dpct:+6.2f} +/- {sdpct:4.2f}':^22} |" line += f" {f'{dt:+.2e} +/- {sdt:.2e}':^22} |" - line += f" {f'{times[i, latest_idx]:.2e} +/- {stddevs[i, latest_idx]:.1e}':^22} |" - line += f" {f'{times[i, master_idx]:.2e} +/- {stddevs[i, master_idx]:.1e}':^22} |" + line += f" {f'{times[i, 1]:.2e} +/- {stddevs[i, 1]:.1e}':^22} |" + line += f" {f'{times[i, 0]:.2e} +/- {stddevs[i, 0]:.1e}':^22} |" commit_msg_lines.append(line) diff --git a/tests/inputs/master_compute_data_rpz.pkl b/tests/inputs/master_compute_data_rpz.pkl index c9a882c95f..d72778328e 100644 Binary files a/tests/inputs/master_compute_data_rpz.pkl and b/tests/inputs/master_compute_data_rpz.pkl differ diff --git a/tests/test_axis_limits.py b/tests/test_axis_limits.py index 5c38730326..8c847ef3a0 100644 --- a/tests/test_axis_limits.py +++ b/tests/test_axis_limits.py @@ -24,10 +24,12 @@ # functions tend toward zero as the magnetic axis is approached and that # d²ψ/(dρ)² and 𝜕√𝑔/𝜕𝜌 are both finite nonzero at the magnetic axis. # Also, dⁿψ/(dρ)ⁿ for n > 3 is assumed zero everywhere. -zero_limits = {"rho", "psi", "psi_r", "e_theta", "sqrt(g)", "B_t"} -# "current Redl" and "P_ISS04" need special treatment because they are not defined for -# all configurations (giving NaN values) -not_continuous_limits = {"current Redl", "P_ISS04"} +zero_limits = {"rho", "psi", "psi_r", "psi_rrr", "e_theta", "sqrt(g)", "B_t"} + +# These compute quantities require kinetic profiles, which are not defined for all +# configurations (giving NaN values) +not_continuous_limits = {"current Redl", "P_ISS04", "P_fusion", ""} + not_finite_limits = { "D_Mercier", "D_geodesic", diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py index 811480bbaf..d4e49016a1 100644 --- a/tests/test_bootstrap.py +++ b/tests/test_bootstrap.py @@ -1196,7 +1196,7 @@ def test_BootstrapRedlConsistency_normalization(self): ) # Results are not perfectly identical because ln(Lambda) is not quite invariant. - np.testing.assert_allclose(results, expected, rtol=2e-3) + np.testing.assert_allclose(results, expected, rtol=3e-3) @pytest.mark.regression def test_bootstrap_consistency_iota(self, TmpDir): diff --git a/tests/test_compute_everything.py b/tests/test_compute_everything.py index aff0345e8f..308138b26e 100644 --- a/tests/test_compute_everything.py +++ b/tests/test_compute_everything.py @@ -80,8 +80,21 @@ def _compare_against_rpz(p, data, data_rpz, coordinate_conversion_func): def test_compute_everything(): """Test that the computations on this branch agree with those on master. - Also make sure we can compute everything without errors. Computed quantities - are both in "rpz" and "xyz" basis. + Also make sure we can compute everything without errors. + + Notes + ----- + This test will fail if the benchmark file has been updated on both + the local and upstream branches and git cannot resolve the merge + conflict. In that case, please regenerate the benchmark file. + Here are instructions for convenience. + + 1. Prepend true to the line near the end of this test. + ``if True or (not error_rpz and update_master_data_rpz):`` + 2. Run pytest -k test_compute_everything + 3. Revert 1. + 4. git add tests/inputs/master_compute_data_rpz.pkl + """ elliptic_cross_section_with_torsion = { "R_lmn": [10, 1, 0.2], diff --git a/tests/test_equilibrium.py b/tests/test_equilibrium.py index 52958a6a9b..471bfa61ae 100644 --- a/tests/test_equilibrium.py +++ b/tests/test_equilibrium.py @@ -15,7 +15,6 @@ from desc.io import InputReader from desc.objectives import ForceBalance, ObjectiveFunction, get_equilibrium_objective from desc.profiles import PowerSeriesProfile -from desc.utils import errorif from .utils import area_difference, compute_coords @@ -50,33 +49,16 @@ def test_map_coordinates(): """Test root finding for (rho,theta,zeta) for common use cases.""" # finding coordinates along a single field line eq = get("NCSX") + with pytest.warns(UserWarning, match="Reducing radial"): + eq.change_resolution(3, 3, 3, 6, 6, 6) n = 100 coords = np.array([np.ones(n), np.zeros(n), np.linspace(0, 10 * np.pi, n)]).T - grid = LinearGrid(rho=1, M=eq.M_grid, N=eq.N_grid, sym=eq.sym, NFP=eq.NFP) - iota = grid.compress(eq.compute("iota", grid=grid)["iota"]) - iota = np.broadcast_to(iota, shape=(n,)) - - tol = 1e-5 - out_1 = eq.map_coordinates( - coords, inbasis=["rho", "alpha", "zeta"], iota=iota, tol=tol - ) - assert np.isfinite(out_1).all() - out_2 = eq.map_coordinates( + out = eq.map_coordinates( coords, inbasis=["rho", "alpha", "zeta"], period=(np.inf, 2 * np.pi, np.inf), - tol=tol, - ) - assert np.isfinite(out_2).all() - diff = out_1 - out_2 - errorif( - not np.all( - np.isclose(diff, 0, atol=2 * tol) - | np.isclose(np.abs(diff), 2 * np.pi, atol=2 * tol) - ), - AssertionError, - f"diff: {diff}", ) + assert np.isfinite(out).all() eq = get("DSHAPE") @@ -89,9 +71,9 @@ def test_map_coordinates(): grid = Grid(np.vstack([rho, theta, zeta]).T, sort=False) in_data = eq.compute(inbasis, grid=grid) - in_coords = np.stack([in_data[k] for k in inbasis], axis=-1) + in_coords = np.column_stack([in_data[k] for k in inbasis]) out_data = eq.compute(outbasis, grid=grid) - out_coords = np.stack([out_data[k] for k in outbasis], axis=-1) + out_coords = np.column_stack([out_data[k] for k in outbasis]) out = eq.map_coordinates( in_coords, diff --git a/tests/test_grid.py b/tests/test_grid.py index 160c6aac9c..051ba1b89f 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -482,14 +482,16 @@ def test_quadrature_grid(self): N = 0 NFP = 1 grid_quad = QuadratureGrid(L, M, N, NFP) - roots, weights = special.js_roots(3, 2, 2) + roots, weights = special.js_roots(2, 2, 2) quadrature_nodes = np.stack( [ - np.array([roots[0]] * 5 + [roots[1]] * 5 + [roots[2]] * 5), np.array( - [0, 2 * np.pi / 5, 4 * np.pi / 5, 6 * np.pi / 5, 8 * np.pi / 5] * 3 + [roots[0]] * grid_quad.num_theta + [roots[1]] * grid_quad.num_theta ), - np.zeros(15), + np.array( + [0, 2 * np.pi / 5, 4 * np.pi / 5, 6 * np.pi / 5, 8 * np.pi / 5] * 2 + ), + np.zeros(10), ] ).T np.testing.assert_allclose(grid_quad.spacing.prod(axis=1), grid_quad.weights) diff --git a/tests/test_objective_funs.py b/tests/test_objective_funs.py index e7fce10920..1619e5c846 100644 --- a/tests/test_objective_funs.py +++ b/tests/test_objective_funs.py @@ -24,7 +24,7 @@ from desc.compute import get_transforms from desc.equilibrium import Equilibrium from desc.examples import get -from desc.geometry import FourierRZToroidalSurface, FourierXYZCurve +from desc.geometry import FourierPlanarCurve, FourierRZToroidalSurface, FourierXYZCurve from desc.grid import ConcentricGrid, LinearGrid, QuadratureGrid from desc.io import load from desc.magnetic_fields import ( @@ -48,6 +48,7 @@ Energy, ForceBalance, ForceBalanceAnisotropic, + FusionPower, GenericObjective, HeatingPowerISS04, Isodynamicity, @@ -208,8 +209,8 @@ def test(eq): obj.build() f = obj.compute_unscaled(*obj.xs(eq)) f_scaled = obj.compute_scaled_error(*obj.xs(eq)) - np.testing.assert_allclose(f, 1.3 / 0.7, rtol=5e-3) - np.testing.assert_allclose(f_scaled, 2 * (1.3 / 0.7), rtol=5e-3) + np.testing.assert_allclose(f, 1.3 / 0.7, rtol=8e-3) + np.testing.assert_allclose(f_scaled, 2 * (1.3 / 0.7), rtol=8e-3) test(get("HELIOTRON")) test(get("HELIOTRON").surface) @@ -908,6 +909,13 @@ def test(coil, grid=None): test(mixed_coils) test(nested_coils, grid=grid) + def test_coil_type_error(self): + """Tests error when objective is not passed a coil.""" + curve = FourierPlanarCurve(r_n=2, basis="rpz") + obj = CoilLength(curve) + with pytest.raises(TypeError): + obj.build() + @pytest.mark.unit def test_coil_min_distance(self): """Tests minimum distance between coils in a coilset.""" @@ -2158,6 +2166,7 @@ class TestComputeScalarResolution: CoilLength, CoilSetMinDistance, CoilTorsion, + FusionPower, GenericObjective, HeatingPowerISS04, Omnigenity, @@ -2219,6 +2228,28 @@ def test_compute_scalar_resolution_bootstrap(self): f[i] = obj.compute_scalar(obj.x()) np.testing.assert_allclose(f, f[-1], rtol=5e-2) + @pytest.mark.regression + def test_compute_scalar_resolution_fusion_power(self): + """FusionPower.""" + eq = self.eq.copy() + eq.electron_density = PowerSeriesProfile([1e19, 0, -1e19]) + eq.electron_temperature = PowerSeriesProfile([1e3, 0, -1e3]) + eq.ion_temperature = PowerSeriesProfile([1e3, 0, -1e3]) + eq.atomic_number = 1.0 + + f = np.zeros_like(self.res_array, dtype=float) + for i, res in enumerate(self.res_array): + grid = QuadratureGrid( + L=int(self.eq.L * res), + M=int(self.eq.M * res), + N=int(self.eq.N * res), + NFP=self.eq.NFP, + ) + obj = ObjectiveFunction(FusionPower(eq=eq, grid=grid)) + obj.build(verbose=0) + f[i] = obj.compute_scalar(obj.x()) + np.testing.assert_allclose(f, f[-1], rtol=5e-2) + @pytest.mark.regression def test_compute_scalar_resolution_heating_power(self): """HeatingPowerISS04.""" @@ -2510,6 +2541,7 @@ class TestObjectiveNaNGrad: CoilSetMinDistance, CoilTorsion, ForceBalanceAnisotropic, + FusionPower, HeatingPowerISS04, Omnigenity, PlasmaCoilSetMinDistance, @@ -2559,6 +2591,22 @@ def test_objective_no_nangrad_bootstrap(self): g = obj.grad(obj.x(eq)) assert not np.any(np.isnan(g)), "redl bootstrap" + @pytest.mark.unit + def test_objective_no_nangrad_fusion_power(self): + """FusionPower.""" + eq = Equilibrium( + L=2, + M=2, + N=2, + electron_density=PowerSeriesProfile([1e19, 0, -1e19]), + electron_temperature=PowerSeriesProfile([1e3, 0, -1e3]), + current=PowerSeriesProfile([1, 0, -1]), + ) + obj = ObjectiveFunction(FusionPower(eq)) + obj.build() + g = obj.grad(obj.x(eq)) + assert not np.any(np.isnan(g)), "fusion power" + @pytest.mark.unit def test_objective_no_nangrad_heating_power(self): """HeatingPowerISS04.""" diff --git a/tests/test_plotting.py b/tests/test_plotting.py index c1bd782ac5..6048a7d601 100644 --- a/tests/test_plotting.py +++ b/tests/test_plotting.py @@ -5,7 +5,6 @@ import matplotlib.pyplot as plt import numpy as np import pytest -from scipy.interpolate import interp1d from desc.basis import ( DoubleFourierSeries, @@ -514,7 +513,14 @@ def test_plot_boundaries(self): eq1 = get("SOLOVEV") eq2 = get("DSHAPE") eq3 = get("W7-X") - fig, ax, data = plot_boundaries((eq1, eq2, eq3), return_data=True) + eq4 = get("ESTELL") + with pytest.raises(ValueError, match="differing field periods"): + fig, ax = plot_boundaries([eq3, eq4], theta=0) + fig, ax, data = plot_boundaries( + (eq1, eq2, eq3), + phi=np.linspace(0, 2 * np.pi / eq3.NFP, 4, endpoint=False), + return_data=True, + ) assert "R" in data.keys() assert "Z" in data.keys() assert len(data["R"]) == 3 @@ -551,6 +557,22 @@ def test_plot_comparison_no_theta(self): fig, ax = plot_comparison(eqf, theta=0) return fig + @pytest.mark.unit + @pytest.mark.mpl_image_compare(remove_text=True, tolerance=tol_2d) + def test_plot_comparison_different_NFPs(self): + """Test plotting comparison of flux surfaces with differing NFPs.""" + eq = get("SOLOVEV") + eq_nonax = get("HELIOTRON") + eq_nonax2 = get("ESTELL") + with pytest.raises(ValueError, match="differing field periods"): + fig, ax = plot_comparison([eq_nonax, eq_nonax2], theta=0) + fig, ax = plot_comparison( + [eq, eq_nonax], + phi=np.linspace(0, 2 * np.pi / eq_nonax.NFP, 6, endpoint=False), + theta=0, + ) + return fig + class TestPlotGrid: """Tests for the plot_grid function.""" @@ -823,17 +845,15 @@ def flatten_coils(coilset): def test_plot_b_mag(): """Test plot of |B| on longer field lines for gyrokinetic simulations.""" psi = 0.5 + rho = np.sqrt(psi) npol = 2 nzgrid = 128 alpha = 0 - # compute and fit iota profile + # compute iota eq = get("W7-X") - data = eq.compute("iota") - fi = interp1d(data["rho"], data["iota"]) + iota = eq.compute("iota", grid=LinearGrid(rho=rho, NFP=eq.NFP))["iota"][0] # get flux tube coordinate system - rho = np.sqrt(psi) - iota = fi(rho) zeta = np.linspace( -np.pi * npol / np.abs(iota), np.pi * npol / np.abs(iota), 2 * nzgrid + 1 )