Compare commits
3 Commits
ccfd4fbf04
...
9ffe57b4d0
Author | SHA1 | Date | |
---|---|---|---|
9ffe57b4d0 | |||
18d766f35a | |||
9763c67657 |
@ -157,7 +157,8 @@ def main():
|
|||||||
e[1][tuple(grid.shape//2)] += field_source(t)
|
e[1][tuple(grid.shape//2)] += field_source(t)
|
||||||
update_H(e, h)
|
update_H(e, h)
|
||||||
|
|
||||||
print('iteration {}: average {} iterations per sec'.format(t, (t+1)/(time.perf_counter()-start)))
|
avg_rate = (t + 1)/(time.perf_counter() - start))
|
||||||
|
print(f'iteration {t}: average {avg_rate} iterations per sec')
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
|
|
||||||
if t % 20 == 0:
|
if t % 20 == 0:
|
||||||
|
@ -684,11 +684,11 @@ def eigsolve(
|
|||||||
Qi = Qi_func(theta)
|
Qi = Qi_func(theta)
|
||||||
c2 = numpy.cos(2 * theta)
|
c2 = numpy.cos(2 * theta)
|
||||||
s2 = numpy.sin(2 * theta)
|
s2 = numpy.sin(2 * theta)
|
||||||
F = -0.5*s2 * (ZtAZ - DtAD) + c2 * symZtAD
|
F = -0.5 * s2 * (ZtAZ - DtAD) + c2 * symZtAD
|
||||||
trace_deriv = _rtrace_AtB(Qi, F)
|
trace_deriv = _rtrace_AtB(Qi, F)
|
||||||
|
|
||||||
G = Qi @ F.conj().T @ Qi.conj().T
|
G = Qi @ F.conj().T @ Qi.conj().T
|
||||||
H = -0.5*s2 * (ZtZ - DtD) + c2 * symZtD
|
H = -0.5 * s2 * (ZtZ - DtD) + c2 * symZtD
|
||||||
trace_deriv -= _rtrace_AtB(G, H)
|
trace_deriv -= _rtrace_AtB(G, H)
|
||||||
|
|
||||||
trace_deriv *= 2
|
trace_deriv *= 2
|
||||||
@ -696,12 +696,12 @@ def eigsolve(
|
|||||||
|
|
||||||
U_sZtD = U @ symZtD
|
U_sZtD = U @ symZtD
|
||||||
|
|
||||||
dE = 2.0 * (_rtrace_AtB(U, symZtAD) -
|
dE = 2.0 * (_rtrace_AtB(U, symZtAD)
|
||||||
_rtrace_AtB(ZtAZU, U_sZtD))
|
- _rtrace_AtB(ZtAZU, U_sZtD))
|
||||||
|
|
||||||
d2E = 2 * (_rtrace_AtB(U, DtAD) -
|
d2E = 2 * (_rtrace_AtB(U, DtAD)
|
||||||
_rtrace_AtB(ZtAZU, U @ (DtD - 4 * symZtD @ U_sZtD)) -
|
- _rtrace_AtB(ZtAZU, U @ (DtD - 4 * symZtD @ U_sZtD))
|
||||||
4 * _rtrace_AtB(U, symZtAD @ U_sZtD))
|
- 4 * _rtrace_AtB(U, symZtAD @ U_sZtD))
|
||||||
|
|
||||||
# Newton-Raphson to find a root of the first derivative:
|
# Newton-Raphson to find a root of the first derivative:
|
||||||
theta = -dE / d2E
|
theta = -dE / d2E
|
||||||
@ -781,7 +781,7 @@ def linmin(x_guess, f0, df0, x_max, f_tol=0.1, df_tol=min(tolerance, 1e-6), x_to
|
|||||||
x_min, x_max, isave, dsave)
|
x_min, x_max, isave, dsave)
|
||||||
for i in range(int(1e6)):
|
for i in range(int(1e6)):
|
||||||
if task != 'F':
|
if task != 'F':
|
||||||
logging.info('search converged in {} iterations'.format(i))
|
logging.info(f'search converged in {i} iterations')
|
||||||
break
|
break
|
||||||
fx = f(x, dfx)
|
fx = f(x, dfx)
|
||||||
x, fx, dfx, task = minpack2.dsrch(x, fx, dfx, f_tol, df_tol, x_tol, task,
|
x, fx, dfx, task = minpack2.dsrch(x, fx, dfx, f_tol, df_tol, x_tol, task,
|
||||||
|
@ -43,7 +43,8 @@ def _scipy_qmr(
|
|||||||
nonlocal ii
|
nonlocal ii
|
||||||
ii += 1
|
ii += 1
|
||||||
if ii % 100 == 0:
|
if ii % 100 == 0:
|
||||||
logger.info('Solver residual at iteration {} : {}'.format(ii, norm(A @ xk - b)))
|
cur_norm = norm(A @ xk - b)
|
||||||
|
logger.info(f'Solver residual at iteration {ii} : {cur_norm}')
|
||||||
|
|
||||||
if 'callback' in kwargs:
|
if 'callback' in kwargs:
|
||||||
def augmented_callback(xk: ArrayLike) -> None:
|
def augmented_callback(xk: ArrayLike) -> None:
|
||||||
|
@ -253,7 +253,8 @@ def operator_e(
|
|||||||
mu_yx = sparse.diags(numpy.hstack((mu_parts[1], mu_parts[0])))
|
mu_yx = sparse.diags(numpy.hstack((mu_parts[1], mu_parts[0])))
|
||||||
mu_z_inv = sparse.diags(1 / mu_parts[2])
|
mu_z_inv = sparse.diags(1 / mu_parts[2])
|
||||||
|
|
||||||
op = (omega * omega * mu_yx @ eps_xy
|
op = (
|
||||||
|
omega * omega * mu_yx @ eps_xy
|
||||||
+ mu_yx @ sparse.vstack((-Dby, Dbx)) @ mu_z_inv @ sparse.hstack((-Dfy, Dfx))
|
+ mu_yx @ sparse.vstack((-Dby, Dbx)) @ mu_z_inv @ sparse.hstack((-Dfy, Dfx))
|
||||||
+ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby)) @ eps_xy
|
+ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby)) @ eps_xy
|
||||||
)
|
)
|
||||||
@ -321,7 +322,8 @@ def operator_h(
|
|||||||
mu_xy = sparse.diags(numpy.hstack((mu_parts[0], mu_parts[1])))
|
mu_xy = sparse.diags(numpy.hstack((mu_parts[0], mu_parts[1])))
|
||||||
mu_z_inv = sparse.diags(1 / mu_parts[2])
|
mu_z_inv = sparse.diags(1 / mu_parts[2])
|
||||||
|
|
||||||
op = (omega * omega * eps_yx @ mu_xy
|
op = (
|
||||||
|
omega * omega * eps_yx @ mu_xy
|
||||||
+ eps_yx @ sparse.vstack((-Dfy, Dfx)) @ eps_z_inv @ sparse.hstack((-Dby, Dbx))
|
+ eps_yx @ sparse.vstack((-Dfy, Dfx)) @ eps_z_inv @ sparse.hstack((-Dby, Dbx))
|
||||||
+ sparse.vstack((Dbx, Dby)) @ mu_z_inv @ sparse.hstack((Dfx, Dfy)) @ mu_xy
|
+ sparse.vstack((Dbx, Dby)) @ mu_z_inv @ sparse.hstack((Dfx, Dfy)) @ mu_xy
|
||||||
)
|
)
|
||||||
@ -420,7 +422,7 @@ def _normalized_fields(
|
|||||||
Sz_a = E[0] * numpy.conj(H[1] * phase) * dxes_real[0][1] * dxes_real[1][0]
|
Sz_a = E[0] * numpy.conj(H[1] * phase) * dxes_real[0][1] * dxes_real[1][0]
|
||||||
Sz_b = E[1] * numpy.conj(H[0] * phase) * dxes_real[0][0] * dxes_real[1][1]
|
Sz_b = E[1] * numpy.conj(H[0] * phase) * dxes_real[0][0] * dxes_real[1][1]
|
||||||
Sz_tavg = numpy.real(Sz_a.sum() - Sz_b.sum()) * 0.5 # 0.5 since E, H are assumed to be peak (not RMS) amplitudes
|
Sz_tavg = numpy.real(Sz_a.sum() - Sz_b.sum()) * 0.5 # 0.5 since E, H are assumed to be peak (not RMS) amplitudes
|
||||||
assert Sz_tavg > 0, 'Found a mode propagating in the wrong direction! Sz_tavg={}'.format(Sz_tavg)
|
assert Sz_tavg > 0, f'Found a mode propagating in the wrong direction! {Sz_tavg=}'
|
||||||
|
|
||||||
energy = epsilon * e.conj() * e
|
energy = epsilon * e.conj() * e
|
||||||
|
|
||||||
@ -718,6 +720,109 @@ def e_err(
|
|||||||
return float(norm(op) / norm(e))
|
return float(norm(op) / norm(e))
|
||||||
|
|
||||||
|
|
||||||
|
def sensitivity(
|
||||||
|
e_norm: vcfdfield_t,
|
||||||
|
h_norm: vcfdfield_t,
|
||||||
|
wavenumber: complex,
|
||||||
|
omega: complex,
|
||||||
|
dxes: dx_lists_t,
|
||||||
|
epsilon: vfdfield_t,
|
||||||
|
mu: vfdfield_t | None = None,
|
||||||
|
) -> vcfdfield_t:
|
||||||
|
r"""
|
||||||
|
Given a waveguide structure (`dxes`, `epsilon`, `mu`) and mode fields
|
||||||
|
(`e_norm`, `h_norm`, `wavenumber`, `omega`), calculates the sensitivity of the wavenumber
|
||||||
|
$\beta$ to changes in the dielectric structure $\epsilon$.
|
||||||
|
|
||||||
|
The output is a vector of the same size as `vec(epsilon)`, with each element specifying the
|
||||||
|
sensitivity of `wavenumber` to changes in the corresponding element in `vec(epsilon)`, i.e.
|
||||||
|
|
||||||
|
$$sens_{i} = \frac{\partial\beta}{\partial\epsilon_i}$$
|
||||||
|
|
||||||
|
An adjoint approach is used to calculate the sensitivity; the derivation is provided here:
|
||||||
|
|
||||||
|
Starting with the eigenvalue equation
|
||||||
|
|
||||||
|
$$\beta^2 E_{xy} = A_E E_{xy}$$
|
||||||
|
|
||||||
|
where $A_E$ is the waveguide operator from `operator_e()`, and $E_{xy} = \begin{bmatrix} E_x \\
|
||||||
|
E_y \end{bmatrix}$,
|
||||||
|
we can differentiate with respect to one of the $\epsilon$ elements (i.e. at one Yee grid point), $\epsilon_i$:
|
||||||
|
|
||||||
|
$$
|
||||||
|
(2 \beta) \partial_{\epsilon_i}(\beta) E_{xy} + \beta^2 \partial_{\epsilon_i} E_{xy}
|
||||||
|
= \partial_{\epsilon_i}(A_E) E_{xy} + A_E \partial_{\epsilon_i} E_{xy}
|
||||||
|
$$
|
||||||
|
|
||||||
|
We then multiply by $H_{yx}^\star = \begin{bmatrix}H_y^\star \\ -H_x^\star \end{bmatrix}$ from the left:
|
||||||
|
|
||||||
|
$$
|
||||||
|
(2 \beta) \partial_{\epsilon_i}(\beta) H_{yx}^\star E_{xy} + \beta^2 H_{yx}^\star \partial_{\epsilon_i} E_{xy}
|
||||||
|
= H_{yx}^\star \partial_{\epsilon_i}(A_E) E_{xy} + H_{yx}^\star A_E \partial_{\epsilon_i} E_{xy}
|
||||||
|
$$
|
||||||
|
|
||||||
|
However, $H_{yx}^\star$ is actually a left-eigenvector of $A_E$. This can be verified by inspecting
|
||||||
|
the form of `operator_h` ($A_H$) and comparing its conjugate transpose to `operator_e` ($A_E$). Also, note
|
||||||
|
$H_{yx}^\star \cdot E_{xy} = H^\star \times E$ recalls the mode orthogonality relation. See doi:10.5194/ars-9-85-201
|
||||||
|
for a similar approach. Therefore,
|
||||||
|
|
||||||
|
$$
|
||||||
|
H_{yx}^\star A_E \partial_{\epsilon_i} E_{xy} = \beta^2 H_{yx}^\star \partial_{\epsilon_i} E_{xy}
|
||||||
|
$$
|
||||||
|
|
||||||
|
and we can simplify to
|
||||||
|
|
||||||
|
$$
|
||||||
|
\partial_{\epsilon_i}(\beta)
|
||||||
|
= \frac{1}{2 \beta} \frac{H_{yx}^\star \partial_{\epsilon_i}(A_E) E_{xy} }{H_{yx}^\star E_{xy}}
|
||||||
|
$$
|
||||||
|
|
||||||
|
This expression can be quickly calculated for all $i$ by writing out the various terms of
|
||||||
|
$\partial_{\epsilon_i} A_E$ and recognizing that the vector-matrix-vector products (i.e. scalars)
|
||||||
|
$sens_i = \vec{v}_{left} \partial_{\epsilon_i} (\epsilon_{xyz}) \vec{v}_{right}$, indexed by $i$, can be expressed as
|
||||||
|
elementwise multiplications $\vec{sens} = \vec{v}_{left} \star \vec{v}_{right}$
|
||||||
|
|
||||||
|
|
||||||
|
Args:
|
||||||
|
e_norm: Normalized, vectorized E_xyz field for the mode. E.g. as returned by `normalized_fields_e`.
|
||||||
|
h_norm: Normalized, vectorized H_xyz field for the mode. E.g. as returned by `normalized_fields_e`.
|
||||||
|
wavenumber: Propagation constant for the mode. The z-axis is assumed to be continuous (i.e. without numerical dispersion).
|
||||||
|
omega: The angular frequency of the system.
|
||||||
|
dxes: Grid parameters `[dx_e, dx_h]` as described in `meanas.fdmath.types` (2D)
|
||||||
|
epsilon: Vectorized dielectric constant grid
|
||||||
|
mu: Vectorized magnetic permeability grid (default 1 everywhere)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sparse matrix representation of the operator.
|
||||||
|
"""
|
||||||
|
if mu is None:
|
||||||
|
mu = numpy.ones_like(epsilon)
|
||||||
|
|
||||||
|
Dfx, Dfy = deriv_forward(dxes[0])
|
||||||
|
Dbx, Dby = deriv_back(dxes[1])
|
||||||
|
|
||||||
|
eps_x, eps_y, eps_z = numpy.split(epsilon, 3)
|
||||||
|
eps_xy = sparse.diags(numpy.hstack((eps_x, eps_y)))
|
||||||
|
eps_z_inv = sparse.diags(1 / eps_z)
|
||||||
|
|
||||||
|
mu_x, mu_y, _mu_z = numpy.split(mu, 3)
|
||||||
|
mu_yx = sparse.diags(numpy.hstack((mu_y, mu_x)))
|
||||||
|
|
||||||
|
dv_e = dxes[0][0][:, None, None] * dxes[0][1][None, :, None] * dxes[0][2][None, None, :]
|
||||||
|
dv_h = dxes[1][0][:, None, None] * dxes[1][1][None, :, None] * dxes[1][2][None, None, :]
|
||||||
|
ev_xy = numpy.concatenate(numpy.split(e_norm, 3)[:2]) * dv_e
|
||||||
|
hx, hy, hz = numpy.split(h_norm, 3)
|
||||||
|
hv_yx_conj = numpy.conj(numpy.concatenate([hy, -hx])) * dv_h
|
||||||
|
|
||||||
|
sens_xy1 = (hv_yx_conj @ (omega * omega @ mu_yx)) * ev_xy
|
||||||
|
sens_xy2 = (hv_yx_conj @ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby))) * ev_xy
|
||||||
|
sens_z = (hv_yx_conj @ sparse.vstack((Dfx, Dfy)) @ (-eps_z_inv * eps_z_inv)) * (sparse.hstack((Dbx, Dby)) @ eps_xy @ ev_xy)
|
||||||
|
norm = hv_yx_conj @ ev_xy
|
||||||
|
|
||||||
|
sens_tot = numpy.concatenate([sens_xy1 + sens_xy2, sens_z]) / (2 * wavenumber * norm)
|
||||||
|
return sens_tot
|
||||||
|
|
||||||
|
|
||||||
def solve_modes(
|
def solve_modes(
|
||||||
mode_numbers: list[int],
|
mode_numbers: list[int],
|
||||||
omega: complex,
|
omega: complex,
|
||||||
|
@ -29,9 +29,9 @@ def shift_circ(
|
|||||||
Sparse matrix for performing the circular shift.
|
Sparse matrix for performing the circular shift.
|
||||||
"""
|
"""
|
||||||
if len(shape) not in (2, 3):
|
if len(shape) not in (2, 3):
|
||||||
raise Exception('Invalid shape: {}'.format(shape))
|
raise Exception(f'Invalid shape: {shape}')
|
||||||
if axis not in range(len(shape)):
|
if axis not in range(len(shape)):
|
||||||
raise Exception('Invalid direction: {}, shape is {}'.format(axis, shape))
|
raise Exception(f'Invalid direction: {axis}, shape is {shape}')
|
||||||
|
|
||||||
shifts = [abs(shift_distance) if a == axis else 0 for a in range(3)]
|
shifts = [abs(shift_distance) if a == axis else 0 for a in range(3)]
|
||||||
shifted_diags = [(numpy.arange(n) + s) % n for n, s in zip(shape, shifts)]
|
shifted_diags = [(numpy.arange(n) + s) % n for n, s in zip(shape, shifts)]
|
||||||
@ -69,12 +69,11 @@ def shift_with_mirror(
|
|||||||
Sparse matrix for performing the shift-with-mirror.
|
Sparse matrix for performing the shift-with-mirror.
|
||||||
"""
|
"""
|
||||||
if len(shape) not in (2, 3):
|
if len(shape) not in (2, 3):
|
||||||
raise Exception('Invalid shape: {}'.format(shape))
|
raise Exception(f'Invalid shape: {shape}')
|
||||||
if axis not in range(len(shape)):
|
if axis not in range(len(shape)):
|
||||||
raise Exception('Invalid direction: {}, shape is {}'.format(axis, shape))
|
raise Exception(f'Invalid direction: {axis}, shape is {shape}')
|
||||||
if shift_distance >= shape[axis]:
|
if shift_distance >= shape[axis]:
|
||||||
raise Exception('Shift ({}) is too large for axis {} of size {}'.format(
|
raise Exception(f'Shift ({shift_distance}) is too large for axis {axis} of size {shape[axis]}')
|
||||||
shift_distance, axis, shape[axis]))
|
|
||||||
|
|
||||||
def mirrored_range(n: int, s: int) -> NDArray[numpy.int_]:
|
def mirrored_range(n: int, s: int) -> NDArray[numpy.int_]:
|
||||||
v = numpy.arange(n) + s
|
v = numpy.arange(n) + s
|
||||||
@ -198,7 +197,7 @@ def avg_forward(axis: int, shape: Sequence[int]) -> sparse.spmatrix:
|
|||||||
Sparse matrix for forward average operation.
|
Sparse matrix for forward average operation.
|
||||||
"""
|
"""
|
||||||
if len(shape) not in (2, 3):
|
if len(shape) not in (2, 3):
|
||||||
raise Exception('Invalid shape: {}'.format(shape))
|
raise Exception(f'Invalid shape: {shape}')
|
||||||
|
|
||||||
n = numpy.prod(shape)
|
n = numpy.prod(shape)
|
||||||
return 0.5 * (sparse.eye(n) + shift_circ(axis, shape))
|
return 0.5 * (sparse.eye(n) + shift_circ(axis, shape))
|
||||||
|
@ -15,13 +15,17 @@ def conducting_boundary(
|
|||||||
) -> tuple[fdfield_updater_t, fdfield_updater_t]:
|
) -> tuple[fdfield_updater_t, fdfield_updater_t]:
|
||||||
dirs = [0, 1, 2]
|
dirs = [0, 1, 2]
|
||||||
if direction not in dirs:
|
if direction not in dirs:
|
||||||
raise Exception('Invalid direction: {}'.format(direction))
|
raise Exception(f'Invalid direction: {direction}')
|
||||||
dirs.remove(direction)
|
dirs.remove(direction)
|
||||||
u, v = dirs
|
u, v = dirs
|
||||||
|
|
||||||
|
boundary_slice: list[Any]
|
||||||
|
shifted1_slice: list[Any]
|
||||||
|
shifted2_slice: list[Any]
|
||||||
|
|
||||||
if polarity < 0:
|
if polarity < 0:
|
||||||
boundary_slice = [slice(None)] * 3 # type: list[Any]
|
boundary_slice = [slice(None)] * 3
|
||||||
shifted1_slice = [slice(None)] * 3 # type: list[Any]
|
shifted1_slice = [slice(None)] * 3
|
||||||
boundary_slice[direction] = 0
|
boundary_slice[direction] = 0
|
||||||
shifted1_slice[direction] = 1
|
shifted1_slice[direction] = 1
|
||||||
|
|
||||||
@ -42,7 +46,7 @@ def conducting_boundary(
|
|||||||
if polarity > 0:
|
if polarity > 0:
|
||||||
boundary_slice = [slice(None)] * 3
|
boundary_slice = [slice(None)] * 3
|
||||||
shifted1_slice = [slice(None)] * 3
|
shifted1_slice = [slice(None)] * 3
|
||||||
shifted2_slice = [slice(None)] * 3 # type: list[Any]
|
shifted2_slice = [slice(None)] * 3
|
||||||
boundary_slice[direction] = -1
|
boundary_slice[direction] = -1
|
||||||
shifted1_slice[direction] = -2
|
shifted1_slice[direction] = -2
|
||||||
shifted2_slice[direction] = -3
|
shifted2_slice[direction] = -3
|
||||||
@ -64,4 +68,4 @@ def conducting_boundary(
|
|||||||
|
|
||||||
return ep, hp
|
return ep, hp
|
||||||
|
|
||||||
raise Exception('Bad polarity: {}'.format(polarity))
|
raise Exception(f'Bad polarity: {polarity}')
|
||||||
|
@ -33,10 +33,10 @@ def cpml_params(
|
|||||||
) -> dict[str, Any]:
|
) -> dict[str, Any]:
|
||||||
|
|
||||||
if axis not in range(3):
|
if axis not in range(3):
|
||||||
raise Exception('Invalid axis: {}'.format(axis))
|
raise Exception(f'Invalid axis: {axis}')
|
||||||
|
|
||||||
if polarity not in (-1, 1):
|
if polarity not in (-1, 1):
|
||||||
raise Exception('Invalid polarity: {}'.format(polarity))
|
raise Exception(f'Invalid polarity: {polarity}')
|
||||||
|
|
||||||
if thickness <= 2:
|
if thickness <= 2:
|
||||||
raise Exception('It would be wise to have a pml with 4+ cells of thickness')
|
raise Exception('It would be wise to have a pml with 4+ cells of thickness')
|
||||||
|
@ -101,7 +101,7 @@ def test_poynting_divergence(sim: 'TDResult') -> None:
|
|||||||
def test_poynting_planes(sim: 'TDResult') -> None:
|
def test_poynting_planes(sim: 'TDResult') -> None:
|
||||||
mask = (sim.js[0] != 0).any(axis=0)
|
mask = (sim.js[0] != 0).any(axis=0)
|
||||||
if mask.sum() > 1:
|
if mask.sum() > 1:
|
||||||
pytest.skip('test_poynting_planes can only test single point sources, got {}'.format(mask.sum()))
|
pytest.skip(f'test_poynting_planes can only test single point sources, got {mask.sum()}')
|
||||||
|
|
||||||
args: dict[str, Any] = {
|
args: dict[str, Any] = {
|
||||||
'dxes': sim.dxes,
|
'dxes': sim.dxes,
|
||||||
|
Loading…
Reference in New Issue
Block a user