Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Improved performance of antenna metrics calculation by utilizing cached wave amplitude calculations instead of recomputing wave amplitudes for each port excitation in the `TerminalComponentModelerData`.

### Fixed
- More robust `Sellmeier` and `Debye` material model, and prevent very large pole parameters in `PoleResidue` material model.


## [v2.10.0rc2] - 2025-10-01

### Added
Expand Down
31 changes: 23 additions & 8 deletions tests/test_components/test_custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,14 @@ def make_custom_current_source():
return td.CustomCurrentSource(size=SIZE, source_time=ST, current_dataset=current_dataset)


def make_spatial_data(value=0, dx=0, unstructured=False, seed=None, uniform=False):
def make_spatial_data(
value=0, dx=0, unstructured=False, seed=None, uniform=False, random_magnitude=1
):
"""Makes a spatial data array."""
if uniform:
data = value * np.ones((Nx, Ny, Nz))
else:
data = np.random.random((Nx, Ny, Nz)) + value
data = np.random.random((Nx, Ny, Nz)) * random_magnitude + value
arr = td.SpatialDataArray(data, coords={"x": X + dx, "y": Y, "z": Z})
if unstructured:
method = "direct" if uniform else "linear"
Expand Down Expand Up @@ -777,11 +779,11 @@ def test_custom_pole_residue(unstructured):
def test_custom_sellmeier(unstructured):
"""Custom Sellmeier medium."""
seed = 897245
b1 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
c1 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
b1 = make_spatial_data(value=0.1, unstructured=unstructured, seed=seed)
c1 = make_spatial_data(value=0.1, unstructured=unstructured, seed=seed)

b2 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
c2 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
b2 = make_spatial_data(value=0.1, unstructured=unstructured, seed=seed)
c2 = make_spatial_data(value=0.1, unstructured=unstructured, seed=seed)

# complex b
with pytest.raises(pydantic.ValidationError):
Expand Down Expand Up @@ -815,6 +817,13 @@ def test_custom_sellmeier(unstructured):
btmp = make_spatial_data(value=0, dx=1, unstructured=(not unstructured), seed=seed)
mat = CustomSellmeier(coeffs=((b1, c2), (btmp, c2)))

# some of C is close to 0
with pytest.raises(pydantic.ValidationError):
ctmp = make_spatial_data(
value=0, unstructured=unstructured, seed=seed, random_magnitude=1e-7
)
mat = CustomSellmeier(coeffs=((b1, c1), (b2, ctmp)))

mat = CustomSellmeier(coeffs=((b1, c1), (b2, c2)))
verify_custom_dispersive_medium_methods(mat, ["coeffs"])
assert mat.n_cfl == 1
Expand Down Expand Up @@ -931,10 +940,10 @@ def test_custom_debye(unstructured):
eps_inf = make_spatial_data(value=1, unstructured=unstructured, seed=seed)

eps1 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
tau1 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
tau1 = make_spatial_data(value=0.1, unstructured=unstructured, seed=seed)

eps2 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
tau2 = make_spatial_data(value=0, unstructured=unstructured, seed=seed)
tau2 = make_spatial_data(value=0.1, unstructured=unstructured, seed=seed)

# complex eps
with pytest.raises(pydantic.ValidationError):
Expand All @@ -951,6 +960,12 @@ def test_custom_debye(unstructured):
tautmp = make_spatial_data(value=-0.5, unstructured=unstructured, seed=seed)
mat = CustomDebye(eps_inf=eps_inf, coeffs=((eps1, tau1), (eps2, tautmp)))

# some of tau is close to 0
with pytest.raises(pydantic.ValidationError):
tautmp = make_spatial_data(
value=0, unstructured=unstructured, seed=seed, random_magnitude=1e-38
)
mat = CustomDebye(eps_inf=eps_inf, coeffs=((eps1, tau1), (eps2, tautmp)))
# inconsistent coords
with pytest.raises(pydantic.ValidationError):
epstmp = make_spatial_data(value=0, dx=1, unstructured=unstructured, seed=seed)
Expand Down
13 changes: 12 additions & 1 deletion tests/test_components/test_medium.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,15 @@ def test_medium():
_ = td.Medium(conductivity=-1.0)


def test_validate_largest_pole_parameters():
# error for large pole parameters
with pytest.raises(pydantic.ValidationError):
_ = td.PoleResidue(poles=[((-1e50 + 2j), (1 + 3j))])

with pytest.raises(pydantic.ValidationError):
_ = td.PoleResidue(poles=[((-1 + 2j), (1e50 + 3j))])


def test_medium_conversions():
n = 4.0
k = 1.0
Expand Down Expand Up @@ -255,13 +264,15 @@ def test_medium_dispersion():
def test_medium_dispersion_conversion():
m_PR = td.PoleResidue(eps_inf=1.0, poles=[((-1 + 2j), (1 + 3j)), ((-2 + 4j), (1 + 5j))])
m_SM = td.Sellmeier(coeffs=[(2, 3), (2, 4)])
m_SM_small_C = td.Sellmeier(coeffs=[(2, 3), (2, 1e-20)])
m_LZ = td.Lorentz(eps_inf=1.0, coeffs=[(1, 3, 2), (2, 4, 1)])
m_LZ2 = td.Lorentz(eps_inf=1.0, coeffs=[(1, 2, 3), (2, 1, 4)])
m_DR = td.Drude(eps_inf=1.0, coeffs=[(1, 3), (2, 4)])
m_DB = td.Debye(eps_inf=1.0, coeffs=[(1, 3), (2, 4)])
m_DB_small_tau = td.Debye(eps_inf=1.0, coeffs=[(1, 3), (2, 1e-50)])

freqs = np.linspace(0.01, 1, 1001)
for medium in [m_PR, m_SM, m_DB, m_LZ, m_DR, m_LZ2]: # , m_DB]:
for medium in [m_PR, m_SM, m_SM_small_C, m_DB, m_DB_small_tau, m_LZ, m_DR, m_LZ2]: # , m_DB]:
eps_model = medium.eps_model(freqs)
eps_pr = medium.pole_residue.eps_model(freqs)
np.testing.assert_allclose(eps_model, eps_pr)
Expand Down
65 changes: 55 additions & 10 deletions tidy3d/components/medium.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
ETA_0,
HBAR,
HERTZ,
LARGEST_FP_NUMBER,
MICROMETER,
MU_0,
PERMITTIVITY,
Expand Down Expand Up @@ -3367,6 +3368,18 @@ def _causality_validation(cls, val):
raise SetupError("For stable medium, 'Re(a_i)' must be non-positive.")
return val

@pd.validator("poles", always=True)
def _poles_largest_value(cls, val):
"""Assert pole parameters are not too large."""
for a, c in val:
if np.any(abs(_get_numpy_array(a)) > LARGEST_FP_NUMBER):
raise ValidationError(
"The value of some 'a_i' is too large. They are unlikely to contribute to material dispersion."
)
if np.any(abs(_get_numpy_array(c)) > LARGEST_FP_NUMBER):
raise ValidationError("The value of some 'c_i' is too large.")
return val

_validate_permittivity_modulation = DispersiveMedium._permittivity_modulation_validation()
_validate_conductivity_modulation = DispersiveMedium._conductivity_modulation_validation()

Expand Down Expand Up @@ -4261,14 +4274,19 @@ def eps_model(self, frequency: float) -> complex:
def _pole_residue_dict(self) -> dict:
"""Dict representation of Medium as a pole-residue model"""
poles = []
eps_inf = _ones_like(self.coeffs[0][0])
for B, C in self.coeffs:
beta = 2 * np.pi * C_0 / np.sqrt(C)
alpha = -0.5 * beta * B
a = 1j * beta
c = 1j * alpha
poles.append((a, c))
# for small C, it's equivalent to modifying eps_inf
if np.any(np.isclose(_get_numpy_array(C), 0)):
eps_inf += B
else:
beta = 2 * np.pi * C_0 / np.sqrt(C)
alpha = -0.5 * beta * B
a = 1j * beta
c = 1j * alpha
poles.append((a, c))
return {
"eps_inf": 1,
"eps_inf": eps_inf,
"poles": poles,
"frequency_range": self.frequency_range,
"name": self.name,
Expand Down Expand Up @@ -4439,6 +4457,18 @@ def _passivity_validation(cls, val, values):
)
return val

@pd.validator("coeffs", always=True)
def _coeffs_C_all_near_zero_or_much_greater(cls, val):
"""We restrict either all C~=0, or very different from 0."""
for _, C in val:
c_array_near_zero = np.isclose(_get_numpy_array(C), 0)
if np.any(c_array_near_zero) and not np.all(c_array_near_zero):
raise SetupError(
"Coefficients 'C_i' are restricted to be "
"either all near zero or much greater than 0."
)
return val

@cached_property
def is_spatially_uniform(self) -> bool:
"""Whether the medium is spatially uniform."""
Expand Down Expand Up @@ -5546,14 +5576,19 @@ def _pole_residue_dict(self):
"""Dict representation of Medium as a pole-residue model."""

poles = []
eps_inf = self.eps_inf
for de, tau in self.coeffs:
a = -2 * np.pi / tau + 0j
c = -0.5 * de * a
# for |tau| close to 0, it's equivalent to modifying eps_inf
if np.any(abs(_get_numpy_array(tau)) < 1 / 2 / np.pi / LARGEST_FP_NUMBER):
eps_inf = eps_inf + de
else:
a = -2 * np.pi / tau + 0j
c = -0.5 * de * a

poles.append((a, c))
poles.append((a, c))

return {
"eps_inf": self.eps_inf,
"eps_inf": eps_inf,
"poles": poles,
"frequency_range": self.frequency_range,
"name": self.name,
Expand Down Expand Up @@ -5689,6 +5724,16 @@ def _coeffs_correct_shape(cls, val, values):
raise SetupError("All terms in 'coeffs' must be real.")
return val

@pd.validator("coeffs", always=True)
def _coeffs_tau_all_sufficient_positive(cls, val):
"""We restrict either all tau is sufficently greater than 0."""
for _, tau in val:
if np.any(_get_numpy_array(tau) < 1 / 2 / np.pi / LARGEST_FP_NUMBER):
raise SetupError(
"Coefficients 'tau_i' are restricted to be sufficiently greater than 0."
)
return val

def _compute_derivatives(self, derivative_info: DerivativeInfo) -> AutogradFieldMap:
"""Adjoint derivatives for CustomDebye via analytic chain rule."""

Expand Down
5 changes: 5 additions & 0 deletions tidy3d/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,11 @@
Large number used for comparing infinity.
"""

LARGEST_FP_NUMBER = 1e38
"""
Largest number used for single precision floating point number.
"""

inf = np.inf
"""
Representation of infinity used within tidy3d.
Expand Down