Skip to content

Commit

Permalink
Fixed problems in determining the performance of laplace operators
Browse files Browse the repository at this point in the history
  • Loading branch information
david-zwicker committed Nov 29, 2022
1 parent 0bfa397 commit fe66ba4
Showing 1 changed file with 11 additions and 5 deletions.
16 changes: 11 additions & 5 deletions tests/performance_laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from pde import CylindricalSymGrid, ScalarField, SphericalSymGrid, UnitGrid, config
from pde.grids.boundaries import Boundaries
from pde.tools.misc import estimate_computation_speed
from pde.tools.numba import jit, jit_allocate_out
from pde.tools.numba import jit

config["numba.multithreading"] = False

Expand All @@ -27,9 +27,12 @@ def custom_laplace_2d_periodic(shape, dx=1):
dim_x, dim_y = shape
parallel = dim_x * dim_y >= config["numba.multithreading_threshold"]

@jit_allocate_out(parallel=parallel)
def laplace(arr, out=None, args=None):
@jit(parallel=parallel)
def laplace(arr, out=None):
"""apply laplace operator to array `arr`"""
if out is None:
out = np.empty((dim_x, dim_y))

for i in nb.prange(dim_x):
im = dim_x - 1 if i == 0 else i - 1
ip = 0 if i == dim_x - 1 else i + 1
Expand Down Expand Up @@ -62,9 +65,12 @@ def custom_laplace_2d_neumann(shape, dx=1):
dim_x, dim_y = shape
parallel = dim_x * dim_y >= config["numba.multithreading_threshold"]

@jit_allocate_out(parallel=parallel)
def laplace(arr, out=None, args=None):
@jit(parallel=parallel)
def laplace(arr, out=None):
"""apply laplace operator to array `arr`"""
if out is None:
out = np.empty((dim_x, dim_y))

for i in nb.prange(dim_x):
im = 0 if i == 0 else i - 1
ip = dim_x - 1 if i == dim_x - 1 else i + 1
Expand Down

0 comments on commit fe66ba4

Please sign in to comment.