Skip to content

Commit

Permalink
Merge branch 'development' into radial_weight_power
Browse files Browse the repository at this point in the history
  • Loading branch information
dpgrote committed Jul 19, 2024
2 parents be4c3d1 + f160cfc commit 824198a
Show file tree
Hide file tree
Showing 68 changed files with 913 additions and 1,267 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ jobs:
which nvcc || echo "nvcc not in PATH!"
git clone https://github.com/AMReX-Codes/amrex.git ../amrex
cd ../amrex && git checkout --detach 259db7cfb99e7d1d2ab4bec9b1587fdf624a138a && cd -
cd ../amrex && git checkout --detach dcb9cc0383dcc71e38dee9070574e325a812f8bf && cd -
make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4
ccache -s
Expand Down
25 changes: 20 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Preamble ####################################################################
#
cmake_minimum_required(VERSION 3.20.0)
project(WarpX VERSION 24.06)
project(WarpX VERSION 24.07)

include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake)

Expand Down Expand Up @@ -146,10 +146,19 @@ set_default_build_type("Release")

# Option to enable interprocedural optimization
# (also know as "link-time optimization" or "whole program optimization")
option(WarpX_IPO "Compile WarpX with interprocedural optimization (will take more time)" OFF)
set(_WarpX_IPO_DEFAULT OFF)
set(_WarpX_PYTHON_IPO_DEFAULT ON)
if(DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
set(_WarpX_IPO_DEFAULT ${CMAKE_INTERPROCEDURAL_OPTIMIZATION})
set(_WarpX_PYTHON_IPO_DEFAULT ${CMAKE_INTERPROCEDURAL_OPTIMIZATION})
endif()
option(WarpX_IPO
"Compile WarpX with interprocedural optimization (will take more time)"
${_WarpX_IPO_DEFAULT}
)
option(WarpX_PYTHON_IPO
"Compile Python bindings with interprocedural optimization (IPO) / link-time optimization (LTO)"
ON
${_WarpX_PYTHON_IPO_DEFAULT}
)

set(pyWarpX_VERSION_INFO "" CACHE STRING
Expand Down Expand Up @@ -455,7 +464,7 @@ endif()

# Interprocedural optimization (IPO) / Link-Time Optimization (LTO)
if(WarpX_IPO)
enable_IPO("${_ALL_TARGETS}")
warpx_enable_IPO("${_ALL_TARGETS}")
endif()

# link dependencies
Expand Down Expand Up @@ -488,7 +497,13 @@ foreach(D IN LISTS WarpX_DIMS)
if(WarpX_PYTHON)
target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::module pybind11::windows_extras)
if(WarpX_PYTHON_IPO)
target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::lto)
if(DEFINED CMAKE_INTERPROCEDURAL_OPTIMIZATION)
warpx_enable_IPO(pyWarpX_${SD})
else()
# conditionally defined target in pybind11
# https://github.com/pybind/pybind11/blob/v2.12.0/tools/pybind11Common.cmake#L397-L403
target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::lto)
endif()
endif()
endif()

Expand Down
2 changes: 1 addition & 1 deletion Docs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ openpmd-viewer # for checksumAPI

# PICMI API docs
# note: keep in sync with version in ../requirements.txt
picmistandard==0.28.0
picmistandard==0.29.0
# for development against an unreleased PICMI version, use:
# picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python

Expand Down
4 changes: 2 additions & 2 deletions Docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ def __init__(self, *args, **kwargs):
# built documents.
#
# The short X.Y version.
version = u'24.06'
version = u'24.07'
# The full version, including alpha/beta/rc tags.
release = u'24.06'
release = u'24.07'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
31 changes: 23 additions & 8 deletions Docs/source/usage/parameters.rst
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ Overall simulation parameters

* ``explicit``: Use an explicit solver, such as the standard FDTD or PSATD

* ``theta_implicit_em``: Use a fully implicit electromagnetic solver with a time-biasing parameter theta bound between 0.5 and 1.0. Exact energy conservation is achieved using theta = 0.5. Maximal damping of high-k modes is obtained using theta = 1.0. Choices for the nonlinear solver include a Picard iteration scheme and particle-suppressed (PS) JNFK.
* ``theta_implicit_em``: Use a fully implicit electromagnetic solver with a time-biasing parameter theta bound between 0.5 and 1.0. Exact energy conservation is achieved using theta = 0.5. Maximal damping of high-k modes is obtained using theta = 1.0. Choices for the nonlinear solver include a Picard iteration scheme and particle-suppressed (PS) JFNK.
The algorithm itself is numerical stable for large time steps. That is, it does not require time steps that resolve the plasma period or the CFL condition for light waves. However, the practicality of using a large time step depends on the nonlinear solver. Note that the Picard solver is for demonstration only. It is inefficient and will most like not converge when
:math:`\omega_{pe} \Delta t` is close to or greater than one or when the CFL condition for light waves is violated. The PS-JFNK method must be used in order to use large time steps. However, the current implementation of PS-JFNK is still inefficient because the JFNK solver is not preconditioned and there is no use of the mass matrices to minimize the cost of a linear iteration. The time step is limited by how many cells a particle can cross in a time step (MPI-related) and by the need to resolve the relevant physics.
The Picard method is described in `Angus et al., On numerical energy conservation for an implicit particle-in-cell method coupled with a binary Monte-Carlo algorithm for Coulomb collisions <https://doi.org/10.1016/j.jcp.2022.111030>`__.
Expand Down Expand Up @@ -500,7 +500,7 @@ Domain Boundary Conditions
* ``open``: For the electrostatic Poisson solver based on a Integrated Green Function method.

* ``boundary.potential_lo_x/y/z`` and ``boundary.potential_hi_x/y/z`` (default `0`)
Gives the value of the electric potential at the boundaries, for ``pec`` boundaries. With electrostatic solvers
Gives the value of the electric potential, in Volts, at the boundaries, for ``pec`` boundaries. With electrostatic solvers
(i.e., with ``warpx.do_electrostatic = ...``), this is used in order to compute the potential
in the simulation volume at each timestep. When using other solvers (e.g. Maxwell solver),
setting these variables will trigger an electrostatic solve at ``t=0``, to compute the initial
Expand Down Expand Up @@ -603,7 +603,7 @@ Whether the embedded boundary is defined with an analytical function or an STL f
additionally define the electric potential at the embedded boundary with an analytical function:

* ``warpx.eb_potential(x,y,z,t)`` (`string`)
Gives the value of the electric potential at the surface of the embedded boundary,
Gives the value of the electric potential, in Volts, at the surface of the embedded boundary,
as a function of `x`, `y`, `z` and `t`. With electrostatic solvers (i.e., with
``warpx.do_electrostatic = ...``), this is used in order to compute the potential
in the simulation volume at each timestep. When using other solvers (e.g. Maxwell solver),
Expand Down Expand Up @@ -1163,8 +1163,8 @@ Particle initialization
* ``gaussian_parse_momentum_function``: Gaussian momentum distribution where the mean and the standard deviation are given by functions of position in the input file.
Both are assumed to be non-relativistic.
The mean is the normalized momentum, :math:`u_m = \gamma v_m/c`.
The standard deviation is normalized, :math:`u_th = v_th/c`.
For example, this might be `u_th = sqrt(T*q_e/mass)/clight` given the temperature (in eV) and mass.
The standard deviation is normalized, :math:`u_{th} = v_{th}/c`.
For example, this might be ``u_th = sqrt(T*q_e/mass)/clight`` given the temperature (in eV) and mass.
It requires the following arguments:

* ``<species_name>.momentum_function_ux_m(x,y,z)``: mean :math:`u_{x}`
Expand Down Expand Up @@ -1754,8 +1754,8 @@ are applied to the grid directly. In particular, these fields can be seen in the
One can refer to input files in ``Examples/Tests/LoadExternalField`` for more information.
Regarding how to prepare the openPMD data file, one can refer to
the `openPMD-example-datasets <https://github.com/openPMD/openPMD-example-datasets>`__.
Note that if both `B_ext_grid_init_style` and `E_ext_grid_init_style` are set to
`read_from_file`, the openPMD file specified by `warpx.read_fields_from_path`
Note that if both ``B_ext_grid_init_style`` and ``E_ext_grid_init_style`` are set to
``read_from_file``, the openPMD file specified by ``warpx.read_fields_from_path``
should contain both B and E external fields data.

* ``warpx.E_external_grid`` & ``warpx.B_external_grid`` (list of `3 floats`)
Expand Down Expand Up @@ -1811,6 +1811,21 @@ are applied to the particles directly, at each timestep. As a results, these fie

Note that the position is defined in Cartesian coordinates, as a function of (x,y,z), even for RZ.

* ``read_from_file``: load the external field from an openPMD file.
An additional parameter, indicating the path of an openPMD data file, ``particles.read_fields_from_path``
must be specified, from which the external E field data can be loaded into WarpX.
One can refer to input files in ``Examples/Tests/LoadExternalField`` for more information.
Regarding how to prepare the openPMD data file, one can refer to
the `openPMD-example-datasets <https://github.com/openPMD/openPMD-example-datasets>`__.
Note that if both ``B_ext_particle_init_style`` and ``E_ext_particle_init_style`` are set to
``read_from_file``, the openPMD file specified by ``particles.read_fields_from_path``
should contain both B and E external fields data.

.. note::

When using ``read_from_file``, the fields loaded from the file will be interpolated
to the resolution of the grid used for the simulation.

* ``repeated_plasma_lens``: apply a series of plasma lenses.
The properties of the lenses are defined in the lab frame by the input parameters:

Expand Down Expand Up @@ -2453,7 +2468,7 @@ Grid types (collocated, staggered, hybrid)
For example, :math:`E_z` is gathered using ``algo.particle_shape`` along :math:`(x,y)` and ``algo.particle_shape - 1`` along :math:`z`.
See equations (21)-(23) of :cite:t:`param-Godfrey2013` and associated references for details.

Default: ``interpolation.galerkin_scheme = 0`` with collocated grids and/or momentum-conserving field gathering, ``interpolation.galerkin_scheme = 1`` otherwise.
Default: ``interpolation.galerkin_scheme = 0`` with collocated grids, or momentum-conserving field gathering, or when ``algo.current_deposition = direct`` ; ``interpolation.galerkin_scheme = 1`` otherwise.

.. warning::

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,15 @@
species=[ions],
data_list = ['ux', 'uy', 'uz', 'x', 'y', 'z', 'weighting'],
write_dir='.',
warpx_file_prefix='Python_LoadExternalField3D_plt'
warpx_file_prefix='Python_LoadExternalGridField3D_plt'
)
field_diag = picmi.FieldDiagnostic(
name='diag1',
grid=grid,
period=300,
data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'],
write_dir='.',
warpx_file_prefix='Python_LoadExternalField3D_plt'
warpx_file_prefix='Python_LoadExternalGridField3D_plt'
)

#################################
Expand Down
140 changes: 140 additions & 0 deletions Examples/Tests/LoadExternalField/PICMI_inputs_3d_particle_fields.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
#!/usr/bin/env python3
#
# --- Input file for loading initial field from openPMD file.

from pywarpx import picmi

constants = picmi.constants

#################################
####### GENERAL PARAMETERS ######
#################################

max_steps = 300

max_grid_size = 40
nx = max_grid_size
ny = max_grid_size
nz = max_grid_size

xmin = -1
xmax = 1
ymin = xmin
ymax = xmax
zmin = 0
zmax = 5

number_per_cell = 200

#################################
############ NUMERICS ###########
#################################

verbose = 1
dt = 4.4e-7
use_filter = 0

# Order of particle shape factors
particle_shape = 1

#################################
############ PLASMA #############
#################################

ion_dist = picmi.ParticleListDistribution(
x=0.0,
y=0.2,
z=2.5,
ux=9.5e-05*constants.c,
uy=0.0*constants.c,
uz=1.34e-4*constants.c,
weight=1.0
)

ions = picmi.Species(
particle_type='H',
name='proton', charge='q_e',mass="m_p",
warpx_do_not_deposit=1,
initial_distribution=ion_dist
)

#################################
######## INITIAL FIELD ##########
#################################

applied_field = picmi.LoadAppliedField(
read_fields_from_path="../../../../openPMD-example-datasets/example-femm-3d.h5",
load_E=False
)

#################################
###### GRID AND SOLVER ##########
#################################

grid = picmi.Cartesian3DGrid(
number_of_cells=[nx, ny, nz],
warpx_max_grid_size=max_grid_size,
lower_bound=[xmin, ymin, zmin],
upper_bound=[xmax, ymax, zmax],
lower_boundary_conditions=['dirichlet', 'dirichlet', 'dirichlet'],
upper_boundary_conditions=['dirichlet', 'dirichlet', 'dirichlet'],
lower_boundary_conditions_particles=['absorbing', 'absorbing', 'absorbing'],
upper_boundary_conditions_particles=['absorbing', 'absorbing', 'absorbing']
)
solver = picmi.ElectrostaticSolver(grid=grid)

#################################
######### DIAGNOSTICS ###########
#################################

particle_diag = picmi.ParticleDiagnostic(
name='diag1',
period=300,
species=[ions],
data_list = ['ux', 'uy', 'uz', 'x', 'y', 'z', 'weighting'],
write_dir='.',
warpx_file_prefix='Python_LoadExternalParticleField3D_plt'
)
field_diag = picmi.FieldDiagnostic(
name='diag1',
grid=grid,
period=300,
data_list = ['Bx', 'By', 'Bz', 'Ex', 'Ey', 'Ez', 'Jx', 'Jy', 'Jz'],
write_dir='.',
warpx_file_prefix='Python_LoadExternalParticleField3D_plt'
)

#################################
####### SIMULATION SETUP ########
#################################

sim = picmi.Simulation(
solver=solver,
max_steps=max_steps,
verbose=verbose,
warpx_serialize_initial_conditions=False,
warpx_grid_type='collocated',
warpx_do_dynamic_scheduling=False,
warpx_use_filter=use_filter,
time_step_size=dt,
particle_shape=particle_shape
)

sim.add_applied_field(applied_field)

sim.add_species(
ions,
layout=picmi.PseudoRandomLayout(
n_macroparticles_per_cell=number_per_cell, grid=grid
)
)

sim.add_diagnostic(field_diag)
sim.add_diagnostic(particle_diag)

#################################
##### SIMULATION EXECUTION ######
#################################

#sim.write_input_file('PICMI_inputs_3d')
sim.step(max_steps)
File renamed without changes.
65 changes: 65 additions & 0 deletions Examples/Tests/LoadExternalField/inputs_rz_particle_fields
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
warpx.serialize_initial_conditions = 0
warpx.do_dynamic_scheduling = 0
particles.do_tiling = 0

particles.B_ext_particle_init_style = "read_from_file"
particles.read_fields_from_path = "../../../../openPMD-example-datasets/example-femm-thetaMode.h5"

warpx.grid_type = collocated
warpx.do_electrostatic = labframe

#################################
####### GENERAL PARAMETERS ######
#################################
max_step = 300
amr.n_cell = 40 40
warpx.numprocs = 1 1
amr.max_level = 0
geometry.dims = RZ

geometry.prob_lo = 0.0 0.0
geometry.prob_hi = 1.0 5.0

#################################
###### Boundary Condition #######
#################################
boundary.field_lo = none pec
boundary.field_hi = pec pec
boundary.potential_lo_x = 0
boundary.potential_hi_x = 0
boundary.potential_lo_y = 0
boundary.potential_hi_y = 0
boundary.potential_lo_z = 0
boundary.potential_hi_z = 0

#################################
############ NUMERICS ###########
#################################
warpx.serialize_initial_conditions = 1
warpx.verbose = 1
warpx.const_dt = 4.40917904849092e-7
warpx.use_filter = 0

# Order of particle shape factors
algo.particle_shape = 1

#################################
############ PLASMA #############
#################################
particles.species_names = proton
proton.injection_style = "SingleParticle"
proton.single_particle_pos = 0.0 0.2 2.5
proton.single_particle_u = 9.506735958279367e-05 0.0 0.00013435537232359165
proton.single_particle_weight = 1.0
proton.do_not_deposit = 1
proton.mass = m_p
proton.charge = q_e

# Diagnostics
diagnostics.diags_names = diag1 chk
diag1.intervals = 300
diag1.diag_type = Full

chk.intervals = 150
chk.diag_type = Full
chk.format = checkpoint
2 changes: 1 addition & 1 deletion Examples/Tests/collision/analysis_collision_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,4 +111,4 @@
dim, species_name)

test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, fn, do_particles=False)
checksumAPI.evaluate_checksum(test_name, fn)
Loading

0 comments on commit 824198a

Please sign in to comment.