Compare commits
24 Commits
Author | SHA1 | Date | |
---|---|---|---|
36431cd0e4 | |||
739e96df3d | |||
63e7cb949f | |||
c53a3c4d84 | |||
5dd9994e76 | |||
1021768e30 | |||
95e923d7b7 | |||
3f8802cb5f | |||
43bb0ba379 | |||
e19968bb9f | |||
43f038d761 | |||
d5fca741d1 | |||
ca94ad1b25 | |||
10f26c12b4 | |||
ee51c7db49 | |||
36bea6a593 | |||
b16b35d84a | |||
6f3ae5a64f | |||
99c22d572f | |||
2f00baf0c6 | |||
2712d96f2a | |||
dc3e733e7f | |||
95e3f71b40 | |||
639f88bba8 |
@ -157,7 +157,8 @@ def main():
|
||||
e[1][tuple(grid.shape//2)] += field_source(t)
|
||||
update_H(e, h)
|
||||
|
||||
print('iteration {}: average {} iterations per sec'.format(t, (t+1)/(time.perf_counter()-start)))
|
||||
avg_rate = (t + 1)/(time.perf_counter() - start))
|
||||
print(f'iteration {t}: average {avg_rate} iterations per sec')
|
||||
sys.stdout.flush()
|
||||
|
||||
if t % 20 == 0:
|
||||
|
@ -3,7 +3,7 @@ import numpy
|
||||
from numpy.linalg import norm
|
||||
|
||||
from meanas.fdmath import vec, unvec
|
||||
from meanas.fdfd import waveguide_mode, functional, scpml
|
||||
from meanas.fdfd import waveguide_cyl, functional, scpml
|
||||
from meanas.fdfd.solvers import generic as generic_solver
|
||||
|
||||
import gridlock
|
||||
@ -44,22 +44,27 @@ def test1(solver=generic_solver):
|
||||
# #### Create the grid and draw the device ####
|
||||
grid = gridlock.Grid(edge_coords)
|
||||
epsilon = grid.allocate(n_air**2, dtype=numpy.float32)
|
||||
grid.draw_cuboid(epsilon, center=center, dimensions=[8e3, w, th], eps=n_wg**2)
|
||||
grid.draw_cuboid(epsilon, center=center, dimensions=[8e3, w, th], foreground=n_wg**2)
|
||||
|
||||
dxes = [grid.dxyz, grid.autoshifted_dxyz()]
|
||||
for a in (1, 2):
|
||||
for p in (-1, 1):
|
||||
dxes = scmpl.stretch_with_scpml(dxes, omega=omega, axis=a, polarity=p,
|
||||
thickness=pml_thickness)
|
||||
dxes = scpml.stretch_with_scpml(
|
||||
dxes,
|
||||
omega=omega,
|
||||
axis=a,
|
||||
polarity=p,
|
||||
thickness=pml_thickness,
|
||||
)
|
||||
|
||||
wg_args = {
|
||||
'omega': omega,
|
||||
'dxes': [(d[1], d[2]) for d in dxes],
|
||||
'epsilon': vec(g.transpose([1, 2, 0]) for g in epsilon),
|
||||
'epsilon': vec(epsilon.transpose([0, 2, 3, 1])),
|
||||
'r0': r0,
|
||||
}
|
||||
|
||||
wg_results = waveguide_mode.solve_waveguide_mode_cylindrical(mode_number=0, **wg_args)
|
||||
wg_results = waveguide_cyl.solve_mode(mode_number=0, **wg_args)
|
||||
|
||||
E = wg_results['E']
|
||||
|
||||
@ -70,20 +75,17 @@ def test1(solver=generic_solver):
|
||||
'''
|
||||
Plot results
|
||||
'''
|
||||
def pcolor(v):
|
||||
def pcolor(fig, ax, v, title):
|
||||
vmax = numpy.max(numpy.abs(v))
|
||||
pyplot.pcolor(v.T, cmap='seismic', vmin=-vmax, vmax=vmax)
|
||||
pyplot.axis('equal')
|
||||
pyplot.colorbar()
|
||||
mappable = ax.pcolormesh(v.T, cmap='seismic', vmin=-vmax, vmax=vmax)
|
||||
ax.set_aspect('equal', adjustable='box')
|
||||
ax.set_title(title)
|
||||
ax.figure.colorbar(mappable)
|
||||
|
||||
pyplot.figure()
|
||||
pyplot.subplot(2, 2, 1)
|
||||
pcolor(numpy.real(E[0][:, :]))
|
||||
pyplot.subplot(2, 2, 2)
|
||||
pcolor(numpy.real(E[1][:, :]))
|
||||
pyplot.subplot(2, 2, 3)
|
||||
pcolor(numpy.real(E[2][:, :]))
|
||||
pyplot.subplot(2, 2, 4)
|
||||
fig, axes = pyplot.subplots(2, 2)
|
||||
pcolor(fig, axes[0][0], numpy.real(E[0]), 'Ex')
|
||||
pcolor(fig, axes[0][1], numpy.real(E[1]), 'Ey')
|
||||
pcolor(fig, axes[1][0], numpy.real(E[2]), 'Ez')
|
||||
pyplot.show()
|
||||
|
||||
|
||||
|
@ -11,7 +11,8 @@ __author__ = 'Jan Petykiewicz'
|
||||
|
||||
|
||||
try:
|
||||
with open(pathlib.Path(__file__).parent / 'README.md', 'r') as f:
|
||||
readme_path = pathlib.Path(__file__).parent / 'README.md'
|
||||
with readme_path.open('r') as f:
|
||||
__doc__ = f.read()
|
||||
except Exception:
|
||||
pass
|
||||
|
@ -1,12 +1,12 @@
|
||||
"""
|
||||
Solvers for eigenvalue / eigenvector problems
|
||||
"""
|
||||
from typing import Callable
|
||||
from collections.abc import Callable
|
||||
import numpy
|
||||
from numpy.typing import NDArray, ArrayLike
|
||||
from numpy.linalg import norm
|
||||
from scipy import sparse # type: ignore
|
||||
import scipy.sparse.linalg as spalg # type: ignore
|
||||
from scipy import sparse
|
||||
import scipy.sparse.linalg as spalg
|
||||
|
||||
|
||||
def power_iteration(
|
||||
@ -25,8 +25,9 @@ def power_iteration(
|
||||
Returns:
|
||||
(Largest-magnitude eigenvalue, Corresponding eigenvector estimate)
|
||||
"""
|
||||
rng = numpy.random.default_rng()
|
||||
if guess_vector is None:
|
||||
v = numpy.random.rand(operator.shape[0]) + 1j * numpy.random.rand(operator.shape[0])
|
||||
v = rng.random(operator.shape[0]) + 1j * rng.random(operator.shape[0])
|
||||
else:
|
||||
v = guess_vector
|
||||
|
||||
|
@ -91,5 +91,12 @@ $$
|
||||
|
||||
|
||||
"""
|
||||
from . import solvers, operators, functional, scpml, waveguide_2d, waveguide_3d
|
||||
from . import (
|
||||
solvers as solvers,
|
||||
operators as operators,
|
||||
functional as functional,
|
||||
scpml as scpml,
|
||||
waveguide_2d as waveguide_2d,
|
||||
waveguide_3d as waveguide_3d,
|
||||
)
|
||||
# from . import farfield, bloch TODO
|
||||
|
@ -94,16 +94,17 @@ This module contains functions for generating and solving the
|
||||
|
||||
"""
|
||||
|
||||
from typing import Callable, Any, cast, Sequence
|
||||
from typing import Any, cast
|
||||
from collections.abc import Callable, Sequence
|
||||
import logging
|
||||
import numpy
|
||||
from numpy import pi, real, trace
|
||||
from numpy.fft import fftfreq
|
||||
from numpy.typing import NDArray, ArrayLike
|
||||
import scipy # type: ignore
|
||||
import scipy.optimize # type: ignore
|
||||
from scipy.linalg import norm # type: ignore
|
||||
import scipy.sparse.linalg as spalg # type: ignore
|
||||
import scipy
|
||||
import scipy.optimize
|
||||
from scipy.linalg import norm
|
||||
import scipy.sparse.linalg as spalg
|
||||
|
||||
from ..fdmath import fdfield_t, cfdfield_t
|
||||
|
||||
@ -114,7 +115,6 @@ logger = logging.getLogger(__name__)
|
||||
try:
|
||||
import pyfftw.interfaces.numpy_fft # type: ignore
|
||||
import pyfftw.interfaces # type: ignore
|
||||
import multiprocessing
|
||||
logger.info('Using pyfftw')
|
||||
|
||||
pyfftw.interfaces.cache.enable()
|
||||
@ -155,7 +155,7 @@ def generate_kmn(
|
||||
All are given in the xyz basis (e.g. `|k|[0,0,0] = norm(G_matrix @ k0)`).
|
||||
"""
|
||||
k0 = numpy.array(k0)
|
||||
G_matrix = numpy.array(G_matrix, copy=False)
|
||||
G_matrix = numpy.asarray(G_matrix)
|
||||
|
||||
Gi_grids = numpy.array(numpy.meshgrid(*(fftfreq(n, 1 / n) for n in shape[:3]), indexing='ij'))
|
||||
Gi = numpy.moveaxis(Gi_grids, 0, -1)
|
||||
@ -232,7 +232,7 @@ def maxwell_operator(
|
||||
Raveled conv(1/mu_k, ik x conv(1/eps_k, ik x h_mn)), returned
|
||||
and overwritten in-place of `h`.
|
||||
"""
|
||||
hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
|
||||
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2))
|
||||
|
||||
#{d,e,h}_xyz fields are complex 3-fields in (1/x, 1/y, 1/z) basis
|
||||
|
||||
@ -303,12 +303,12 @@ def hmn_2_exyz(
|
||||
k_mag, m, n = generate_kmn(k0, G_matrix, shape)
|
||||
|
||||
def operator(h: NDArray[numpy.complex128]) -> cfdfield_t:
|
||||
hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
|
||||
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2))
|
||||
d_xyz = (n * hin_m
|
||||
- m * hin_n) * k_mag # noqa: E128
|
||||
|
||||
# divide by epsilon
|
||||
return numpy.array([ei for ei in numpy.moveaxis(ifftn(d_xyz, axes=range(3)) / epsilon, 3, 0)]) # TODO avoid copy
|
||||
return numpy.moveaxis(ifftn(d_xyz, axes=range(3)) / epsilon, 3, 0)
|
||||
|
||||
return operator
|
||||
|
||||
@ -341,7 +341,7 @@ def hmn_2_hxyz(
|
||||
_k_mag, m, n = generate_kmn(k0, G_matrix, shape)
|
||||
|
||||
def operator(h: NDArray[numpy.complex128]) -> cfdfield_t:
|
||||
hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
|
||||
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2))
|
||||
h_xyz = (m * hin_m
|
||||
+ n * hin_n) # noqa: E128
|
||||
return numpy.array([ifftn(hi) for hi in numpy.moveaxis(h_xyz, 3, 0)])
|
||||
@ -394,7 +394,7 @@ def inverse_maxwell_operator_approx(
|
||||
Returns:
|
||||
Raveled ik x conv(eps_k, ik x conv(mu_k, h_mn))
|
||||
"""
|
||||
hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
|
||||
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2))
|
||||
|
||||
#{d,e,h}_xyz fields are complex 3-fields in (1/x, 1/y, 1/z) basis
|
||||
|
||||
@ -538,7 +538,7 @@ def eigsolve(
|
||||
`(eigenvalues, eigenvectors)` where `eigenvalues[i]` corresponds to the
|
||||
vector `eigenvectors[i, :]`
|
||||
"""
|
||||
k0 = numpy.array(k0, copy=False)
|
||||
k0 = numpy.asarray(k0)
|
||||
|
||||
h_size = 2 * epsilon[0].size
|
||||
|
||||
@ -561,11 +561,12 @@ def eigsolve(
|
||||
prev_theta = 0.5
|
||||
D = numpy.zeros(shape=y_shape, dtype=complex)
|
||||
|
||||
rng = numpy.random.default_rng()
|
||||
Z: NDArray[numpy.complex128]
|
||||
if y0 is None:
|
||||
Z = numpy.random.rand(*y_shape) + 1j * numpy.random.rand(*y_shape)
|
||||
Z = rng.random(y_shape) + 1j * rng.random(y_shape)
|
||||
else:
|
||||
Z = numpy.array(y0, copy=False).T
|
||||
Z = numpy.asarray(y0).T
|
||||
|
||||
while True:
|
||||
Z *= num_modes / norm(Z)
|
||||
@ -573,7 +574,7 @@ def eigsolve(
|
||||
try:
|
||||
U = numpy.linalg.inv(ZtZ)
|
||||
except numpy.linalg.LinAlgError:
|
||||
Z = numpy.random.rand(*y_shape) + 1j * numpy.random.rand(*y_shape)
|
||||
Z = rng.random(y_shape) + 1j * rng.random(y_shape)
|
||||
continue
|
||||
|
||||
trace_U = real(trace(U))
|
||||
@ -646,8 +647,7 @@ def eigsolve(
|
||||
|
||||
Qi_memo: list[float | None] = [None, None]
|
||||
|
||||
def Qi_func(theta: float) -> float:
|
||||
nonlocal Qi_memo
|
||||
def Qi_func(theta: float, Qi_memo=Qi_memo, ZtZ=ZtZ, DtD=DtD, symZtD=symZtD) -> float: # noqa: ANN001
|
||||
if Qi_memo[0] == theta:
|
||||
return cast(float, Qi_memo[1])
|
||||
|
||||
@ -656,7 +656,7 @@ def eigsolve(
|
||||
Q = c * c * ZtZ + s * s * DtD + 2 * s * c * symZtD
|
||||
try:
|
||||
Qi = numpy.linalg.inv(Q)
|
||||
except numpy.linalg.LinAlgError:
|
||||
except numpy.linalg.LinAlgError as err:
|
||||
logger.info('taylor Qi')
|
||||
# if c or s small, taylor expand
|
||||
if c < 1e-4 * s and c != 0:
|
||||
@ -666,12 +666,12 @@ def eigsolve(
|
||||
ZtZi = numpy.linalg.inv(ZtZ)
|
||||
Qi = ZtZi / (c * c) - 2 * s / (c * c * c) * (ZtZi @ (ZtZi @ symZtD).conj().T)
|
||||
else:
|
||||
raise Exception('Inexplicable singularity in trace_func')
|
||||
raise Exception('Inexplicable singularity in trace_func') from err
|
||||
Qi_memo[0] = theta
|
||||
Qi_memo[1] = cast(float, Qi)
|
||||
return cast(float, Qi)
|
||||
|
||||
def trace_func(theta: float) -> float:
|
||||
def trace_func(theta: float, ZtAZ=ZtAZ, DtAD=DtAD, symZtAD=symZtAD) -> float: # noqa: ANN001
|
||||
c = numpy.cos(theta)
|
||||
s = numpy.sin(theta)
|
||||
Qi = Qi_func(theta)
|
||||
@ -680,7 +680,7 @@ def eigsolve(
|
||||
return numpy.abs(trace)
|
||||
|
||||
if False:
|
||||
def trace_deriv(theta):
|
||||
def trace_deriv(theta, sgn: int = sgn, ZtAZ=ZtAZ, DtAD=DtAD, symZtD=symZtD, symZtAD=symZtAD, ZtZ=ZtZ, DtD=DtD): # noqa: ANN001
|
||||
Qi = Qi_func(theta)
|
||||
c2 = numpy.cos(2 * theta)
|
||||
s2 = numpy.sin(2 * theta)
|
||||
@ -696,12 +696,12 @@ def eigsolve(
|
||||
|
||||
U_sZtD = U @ symZtD
|
||||
|
||||
dE = 2.0 * (_rtrace_AtB(U, symZtAD) -
|
||||
_rtrace_AtB(ZtAZU, U_sZtD))
|
||||
dE = 2.0 * (_rtrace_AtB(U, symZtAD)
|
||||
- _rtrace_AtB(ZtAZU, U_sZtD))
|
||||
|
||||
d2E = 2 * (_rtrace_AtB(U, DtAD) -
|
||||
_rtrace_AtB(ZtAZU, U @ (DtD - 4 * symZtD @ U_sZtD)) -
|
||||
4 * _rtrace_AtB(U, symZtAD @ U_sZtD))
|
||||
d2E = 2 * (_rtrace_AtB(U, DtAD)
|
||||
- _rtrace_AtB(ZtAZU, U @ (DtD - 4 * symZtD @ U_sZtD))
|
||||
- 4 * _rtrace_AtB(U, symZtAD @ U_sZtD))
|
||||
|
||||
# Newton-Raphson to find a root of the first derivative:
|
||||
theta = -dE / d2E
|
||||
@ -781,7 +781,7 @@ def linmin(x_guess, f0, df0, x_max, f_tol=0.1, df_tol=min(tolerance, 1e-6), x_to
|
||||
x_min, x_max, isave, dsave)
|
||||
for i in range(int(1e6)):
|
||||
if task != 'F':
|
||||
logging.info('search converged in {} iterations'.format(i))
|
||||
logging.info(f'search converged in {i} iterations')
|
||||
break
|
||||
fx = f(x, dfx)
|
||||
x, fx, dfx, task = minpack2.dsrch(x, fx, dfx, f_tol, df_tol, x_tol, task,
|
||||
|
@ -1,7 +1,8 @@
|
||||
"""
|
||||
Functions for performing near-to-farfield transformation (and the reverse).
|
||||
"""
|
||||
from typing import Any, Sequence, cast
|
||||
from typing import Any, cast
|
||||
from collections.abc import Sequence
|
||||
import numpy
|
||||
from numpy.fft import fft2, fftshift, fftfreq, ifft2, ifftshift
|
||||
from numpy import pi
|
||||
|
@ -5,7 +5,7 @@ Functional versions of many FDFD operators. These can be useful for performing
|
||||
The functions generated here expect `cfdfield_t` inputs with shape (3, X, Y, Z),
|
||||
e.g. E = [E_x, E_y, E_z] where each (complex) component has shape (X, Y, Z)
|
||||
"""
|
||||
from typing import Callable
|
||||
from collections.abc import Callable
|
||||
import numpy
|
||||
|
||||
from ..fdmath import dx_lists_t, fdfield_t, cfdfield_t, cfdfield_updater_t
|
||||
@ -47,7 +47,6 @@ def e_full(
|
||||
|
||||
if mu is None:
|
||||
return op_1
|
||||
else:
|
||||
return op_mu
|
||||
|
||||
|
||||
@ -84,7 +83,6 @@ def eh_full(
|
||||
|
||||
if mu is None:
|
||||
return op_1
|
||||
else:
|
||||
return op_mu
|
||||
|
||||
|
||||
@ -116,7 +114,6 @@ def e2h(
|
||||
|
||||
if mu is None:
|
||||
return e2h_1_1
|
||||
else:
|
||||
return e2h_mu
|
||||
|
||||
|
||||
@ -151,7 +148,6 @@ def m2j(
|
||||
|
||||
if mu is None:
|
||||
return m2j_1
|
||||
else:
|
||||
return m2j_mu
|
||||
|
||||
|
||||
|
@ -28,7 +28,7 @@ The following operators are included:
|
||||
"""
|
||||
|
||||
import numpy
|
||||
import scipy.sparse as sparse # type: ignore
|
||||
from scipy import sparse
|
||||
|
||||
from ..fdmath import vec, dx_lists_t, vfdfield_t, vcfdfield_t
|
||||
from ..fdmath.operators import shift_with_mirror, shift_circ, curl_forward, curl_back
|
||||
@ -321,11 +321,11 @@ def poynting_e_cross(e: vcfdfield_t, dxes: dx_lists_t) -> sparse.spmatrix:
|
||||
"""
|
||||
shape = [len(dx) for dx in dxes[0]]
|
||||
|
||||
fx, fy, fz = [shift_circ(i, shape, 1) for i in range(3)]
|
||||
fx, fy, fz = (shift_circ(i, shape, 1) for i in range(3))
|
||||
|
||||
dxag = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[0], indexing='ij')]
|
||||
dxbg = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[1], indexing='ij')]
|
||||
Ex, Ey, Ez = [ei * da for ei, da in zip(numpy.split(e, 3), dxag)]
|
||||
Ex, Ey, Ez = (ei * da for ei, da in zip(numpy.split(e, 3), dxag, strict=True))
|
||||
|
||||
block_diags = [[ None, fx @ -Ez, fx @ Ey],
|
||||
[ fy @ Ez, None, fy @ -Ex],
|
||||
@ -349,11 +349,11 @@ def poynting_h_cross(h: vcfdfield_t, dxes: dx_lists_t) -> sparse.spmatrix:
|
||||
"""
|
||||
shape = [len(dx) for dx in dxes[0]]
|
||||
|
||||
fx, fy, fz = [shift_circ(i, shape, 1) for i in range(3)]
|
||||
fx, fy, fz = (shift_circ(i, shape, 1) for i in range(3))
|
||||
|
||||
dxag = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[0], indexing='ij')]
|
||||
dxbg = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[1], indexing='ij')]
|
||||
Hx, Hy, Hz = [sparse.diags(hi * db) for hi, db in zip(numpy.split(h, 3), dxbg)]
|
||||
Hx, Hy, Hz = (sparse.diags(hi * db) for hi, db in zip(numpy.split(h, 3), dxbg, strict=True))
|
||||
|
||||
P = (sparse.bmat(
|
||||
[[ None, -Hz @ fx, Hy @ fx],
|
||||
|
@ -2,7 +2,7 @@
|
||||
Functions for creating stretched coordinate perfectly matched layer (PML) absorbers.
|
||||
"""
|
||||
|
||||
from typing import Sequence, Callable
|
||||
from collections.abc import Sequence, Callable
|
||||
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
|
@ -2,13 +2,14 @@
|
||||
Solvers and solver interface for FDFD problems.
|
||||
"""
|
||||
|
||||
from typing import Callable, Dict, Any, Optional
|
||||
from typing import Any
|
||||
from collections.abc import Callable
|
||||
import logging
|
||||
|
||||
import numpy
|
||||
from numpy.typing import ArrayLike, NDArray
|
||||
from numpy.linalg import norm
|
||||
import scipy.sparse.linalg # type: ignore
|
||||
import scipy.sparse.linalg
|
||||
|
||||
from ..fdmath import dx_lists_t, vfdfield_t, vcfdfield_t
|
||||
from . import operators
|
||||
@ -43,7 +44,8 @@ def _scipy_qmr(
|
||||
nonlocal ii
|
||||
ii += 1
|
||||
if ii % 100 == 0:
|
||||
logger.info('Solver residual at iteration {} : {}'.format(ii, norm(A @ xk - b)))
|
||||
cur_norm = norm(A @ xk - b)
|
||||
logger.info(f'Solver residual at iteration {ii} : {cur_norm}')
|
||||
|
||||
if 'callback' in kwargs:
|
||||
def augmented_callback(xk: ArrayLike) -> None:
|
||||
@ -67,12 +69,12 @@ def generic(
|
||||
dxes: dx_lists_t,
|
||||
J: vcfdfield_t,
|
||||
epsilon: vfdfield_t,
|
||||
mu: Optional[vfdfield_t] = None,
|
||||
pec: Optional[vfdfield_t] = None,
|
||||
pmc: Optional[vfdfield_t] = None,
|
||||
mu: vfdfield_t | None = None,
|
||||
pec: vfdfield_t | None = None,
|
||||
pmc: vfdfield_t | None = None,
|
||||
adjoint: bool = False,
|
||||
matrix_solver: Callable[..., ArrayLike] = _scipy_qmr,
|
||||
matrix_solver_opts: Optional[Dict[str, Any]] = None,
|
||||
matrix_solver_opts: dict[str, Any] | None = None,
|
||||
) -> vcfdfield_t:
|
||||
"""
|
||||
Conjugate gradient FDFD solver using CSR sparse matrices.
|
||||
|
@ -182,10 +182,10 @@ from typing import Any
|
||||
import numpy
|
||||
from numpy.typing import NDArray, ArrayLike
|
||||
from numpy.linalg import norm
|
||||
import scipy.sparse as sparse # type: ignore
|
||||
from scipy import sparse
|
||||
|
||||
from ..fdmath.operators import deriv_forward, deriv_back, cross
|
||||
from ..fdmath import unvec, dx_lists_t, vfdfield_t, vcfdfield_t
|
||||
from ..fdmath import vec, unvec, dx_lists_t, vfdfield_t, vcfdfield_t
|
||||
from ..eigensolvers import signed_eigensolve, rayleigh_quotient_iteration
|
||||
|
||||
|
||||
@ -253,7 +253,8 @@ def operator_e(
|
||||
mu_yx = sparse.diags(numpy.hstack((mu_parts[1], mu_parts[0])))
|
||||
mu_z_inv = sparse.diags(1 / mu_parts[2])
|
||||
|
||||
op = (omega * omega * mu_yx @ eps_xy
|
||||
op = (
|
||||
omega * omega * mu_yx @ eps_xy
|
||||
+ mu_yx @ sparse.vstack((-Dby, Dbx)) @ mu_z_inv @ sparse.hstack((-Dfy, Dfx))
|
||||
+ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby)) @ eps_xy
|
||||
)
|
||||
@ -321,7 +322,8 @@ def operator_h(
|
||||
mu_xy = sparse.diags(numpy.hstack((mu_parts[0], mu_parts[1])))
|
||||
mu_z_inv = sparse.diags(1 / mu_parts[2])
|
||||
|
||||
op = (omega * omega * eps_yx @ mu_xy
|
||||
op = (
|
||||
omega * omega * eps_yx @ mu_xy
|
||||
+ eps_yx @ sparse.vstack((-Dfy, Dfx)) @ eps_z_inv @ sparse.hstack((-Dby, Dbx))
|
||||
+ sparse.vstack((Dbx, Dby)) @ mu_z_inv @ sparse.hstack((Dfx, Dfy)) @ mu_xy
|
||||
)
|
||||
@ -420,7 +422,7 @@ def _normalized_fields(
|
||||
Sz_a = E[0] * numpy.conj(H[1] * phase) * dxes_real[0][1] * dxes_real[1][0]
|
||||
Sz_b = E[1] * numpy.conj(H[0] * phase) * dxes_real[0][0] * dxes_real[1][1]
|
||||
Sz_tavg = numpy.real(Sz_a.sum() - Sz_b.sum()) * 0.5 # 0.5 since E, H are assumed to be peak (not RMS) amplitudes
|
||||
assert Sz_tavg > 0, 'Found a mode propagating in the wrong direction! Sz_tavg={}'.format(Sz_tavg)
|
||||
assert Sz_tavg > 0, f'Found a mode propagating in the wrong direction! {Sz_tavg=}'
|
||||
|
||||
energy = epsilon * e.conj() * e
|
||||
|
||||
@ -718,6 +720,109 @@ def e_err(
|
||||
return float(norm(op) / norm(e))
|
||||
|
||||
|
||||
def sensitivity(
|
||||
e_norm: vcfdfield_t,
|
||||
h_norm: vcfdfield_t,
|
||||
wavenumber: complex,
|
||||
omega: complex,
|
||||
dxes: dx_lists_t,
|
||||
epsilon: vfdfield_t,
|
||||
mu: vfdfield_t | None = None,
|
||||
) -> vcfdfield_t:
|
||||
r"""
|
||||
Given a waveguide structure (`dxes`, `epsilon`, `mu`) and mode fields
|
||||
(`e_norm`, `h_norm`, `wavenumber`, `omega`), calculates the sensitivity of the wavenumber
|
||||
$\beta$ to changes in the dielectric structure $\epsilon$.
|
||||
|
||||
The output is a vector of the same size as `vec(epsilon)`, with each element specifying the
|
||||
sensitivity of `wavenumber` to changes in the corresponding element in `vec(epsilon)`, i.e.
|
||||
|
||||
$$sens_{i} = \frac{\partial\beta}{\partial\epsilon_i}$$
|
||||
|
||||
An adjoint approach is used to calculate the sensitivity; the derivation is provided here:
|
||||
|
||||
Starting with the eigenvalue equation
|
||||
|
||||
$$\beta^2 E_{xy} = A_E E_{xy}$$
|
||||
|
||||
where $A_E$ is the waveguide operator from `operator_e()`, and $E_{xy} = \begin{bmatrix} E_x \\
|
||||
E_y \end{bmatrix}$,
|
||||
we can differentiate with respect to one of the $\epsilon$ elements (i.e. at one Yee grid point), $\epsilon_i$:
|
||||
|
||||
$$
|
||||
(2 \beta) \partial_{\epsilon_i}(\beta) E_{xy} + \beta^2 \partial_{\epsilon_i} E_{xy}
|
||||
= \partial_{\epsilon_i}(A_E) E_{xy} + A_E \partial_{\epsilon_i} E_{xy}
|
||||
$$
|
||||
|
||||
We then multiply by $H_{yx}^\star = \begin{bmatrix}H_y^\star \\ -H_x^\star \end{bmatrix}$ from the left:
|
||||
|
||||
$$
|
||||
(2 \beta) \partial_{\epsilon_i}(\beta) H_{yx}^\star E_{xy} + \beta^2 H_{yx}^\star \partial_{\epsilon_i} E_{xy}
|
||||
= H_{yx}^\star \partial_{\epsilon_i}(A_E) E_{xy} + H_{yx}^\star A_E \partial_{\epsilon_i} E_{xy}
|
||||
$$
|
||||
|
||||
However, $H_{yx}^\star$ is actually a left-eigenvector of $A_E$. This can be verified by inspecting
|
||||
the form of `operator_h` ($A_H$) and comparing its conjugate transpose to `operator_e` ($A_E$). Also, note
|
||||
$H_{yx}^\star \cdot E_{xy} = H^\star \times E$ recalls the mode orthogonality relation. See doi:10.5194/ars-9-85-201
|
||||
for a similar approach. Therefore,
|
||||
|
||||
$$
|
||||
H_{yx}^\star A_E \partial_{\epsilon_i} E_{xy} = \beta^2 H_{yx}^\star \partial_{\epsilon_i} E_{xy}
|
||||
$$
|
||||
|
||||
and we can simplify to
|
||||
|
||||
$$
|
||||
\partial_{\epsilon_i}(\beta)
|
||||
= \frac{1}{2 \beta} \frac{H_{yx}^\star \partial_{\epsilon_i}(A_E) E_{xy} }{H_{yx}^\star E_{xy}}
|
||||
$$
|
||||
|
||||
This expression can be quickly calculated for all $i$ by writing out the various terms of
|
||||
$\partial_{\epsilon_i} A_E$ and recognizing that the vector-matrix-vector products (i.e. scalars)
|
||||
$sens_i = \vec{v}_{left} \partial_{\epsilon_i} (\epsilon_{xyz}) \vec{v}_{right}$, indexed by $i$, can be expressed as
|
||||
elementwise multiplications $\vec{sens} = \vec{v}_{left} \star \vec{v}_{right}$
|
||||
|
||||
|
||||
Args:
|
||||
e_norm: Normalized, vectorized E_xyz field for the mode. E.g. as returned by `normalized_fields_e`.
|
||||
h_norm: Normalized, vectorized H_xyz field for the mode. E.g. as returned by `normalized_fields_e`.
|
||||
wavenumber: Propagation constant for the mode. The z-axis is assumed to be continuous (i.e. without numerical dispersion).
|
||||
omega: The angular frequency of the system.
|
||||
dxes: Grid parameters `[dx_e, dx_h]` as described in `meanas.fdmath.types` (2D)
|
||||
epsilon: Vectorized dielectric constant grid
|
||||
mu: Vectorized magnetic permeability grid (default 1 everywhere)
|
||||
|
||||
Returns:
|
||||
Sparse matrix representation of the operator.
|
||||
"""
|
||||
if mu is None:
|
||||
mu = numpy.ones_like(epsilon)
|
||||
|
||||
Dfx, Dfy = deriv_forward(dxes[0])
|
||||
Dbx, Dby = deriv_back(dxes[1])
|
||||
|
||||
eps_x, eps_y, eps_z = numpy.split(epsilon, 3)
|
||||
eps_xy = sparse.diags(numpy.hstack((eps_x, eps_y)))
|
||||
eps_z_inv = sparse.diags(1 / eps_z)
|
||||
|
||||
mu_x, mu_y, _mu_z = numpy.split(mu, 3)
|
||||
mu_yx = sparse.diags(numpy.hstack((mu_y, mu_x)))
|
||||
|
||||
da_exxhyy = vec(dxes[1][0][:, None] * dxes[0][1][None, :])
|
||||
da_eyyhxx = vec(dxes[1][1][None, :] * dxes[0][0][:, None])
|
||||
ev_xy = numpy.concatenate(numpy.split(e_norm, 3)[:2]) * numpy.concatenate([da_exxhyy, da_eyyhxx])
|
||||
hx, hy, hz = numpy.split(h_norm, 3)
|
||||
hv_yx_conj = numpy.conj(numpy.concatenate([hy, -hx]))
|
||||
|
||||
sens_xy1 = (hv_yx_conj @ (omega * omega * mu_yx)) * ev_xy
|
||||
sens_xy2 = (hv_yx_conj @ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby))) * ev_xy
|
||||
sens_z = (hv_yx_conj @ sparse.vstack((Dfx, Dfy)) @ (-eps_z_inv * eps_z_inv)) * (sparse.hstack((Dbx, Dby)) @ eps_xy @ ev_xy)
|
||||
norm = hv_yx_conj @ ev_xy
|
||||
|
||||
sens_tot = numpy.concatenate([sens_xy1 + sens_xy2, sens_z]) / (2 * wavenumber * norm)
|
||||
return sens_tot
|
||||
|
||||
|
||||
def solve_modes(
|
||||
mode_numbers: list[int],
|
||||
omega: complex,
|
||||
|
@ -4,9 +4,11 @@ Tools for working with waveguide modes in 3D domains.
|
||||
This module relies heavily on `waveguide_2d` and mostly just transforms
|
||||
its parameters into 2D equivalents and expands the results back into 3D.
|
||||
"""
|
||||
from typing import Sequence, Any
|
||||
from typing import Any
|
||||
from collections.abc import Sequence
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
from numpy import complexfloating
|
||||
|
||||
from ..fdmath import vec, unvec, dx_lists_t, fdfield_t, cfdfield_t
|
||||
from . import operators, waveguide_2d
|
||||
@ -21,7 +23,7 @@ def solve_mode(
|
||||
slices: Sequence[slice],
|
||||
epsilon: fdfield_t,
|
||||
mu: fdfield_t | None = None,
|
||||
) -> dict[str, complex | NDArray[numpy.float_]]:
|
||||
) -> dict[str, complex | NDArray[complexfloating]]:
|
||||
"""
|
||||
Given a 3D grid, selects a slice from the grid and attempts to
|
||||
solve for an eigenmode propagating through that slice.
|
||||
@ -40,8 +42,8 @@ def solve_mode(
|
||||
Returns:
|
||||
```
|
||||
{
|
||||
'E': list[NDArray[numpy.float_]],
|
||||
'H': list[NDArray[numpy.float_]],
|
||||
'E': NDArray[complexfloating],
|
||||
'H': NDArray[complexfloating],
|
||||
'wavenumber': complex,
|
||||
}
|
||||
```
|
||||
|
@ -9,9 +9,9 @@ As the z-dependence is known, all the functions in this file assume a 2D grid
|
||||
# TODO update module docs
|
||||
|
||||
import numpy
|
||||
import scipy.sparse as sparse # type: ignore
|
||||
from scipy import sparse
|
||||
|
||||
from ..fdmath import vec, unvec, dx_lists_t, fdfield_t, vfdfield_t, cfdfield_t
|
||||
from ..fdmath import vec, unvec, dx_lists_t, vfdfield_t, cfdfield_t
|
||||
from ..fdmath.operators import deriv_forward, deriv_back
|
||||
from ..eigensolvers import signed_eigensolve, rayleigh_quotient_iteration
|
||||
|
||||
@ -25,6 +25,9 @@ def cylindrical_operator(
|
||||
"""
|
||||
Cylindrical coordinate waveguide operator of the form
|
||||
|
||||
(NOTE: See 10.1364/OL.33.001848)
|
||||
TODO: consider 10.1364/OE.20.021583
|
||||
|
||||
TODO
|
||||
|
||||
for use with a field vector of the form `[E_r, E_y]`.
|
||||
|
@ -741,8 +741,24 @@ the true values can be multiplied back in after the simulation is complete if no
|
||||
normalized results are needed.
|
||||
"""
|
||||
|
||||
from .types import fdfield_t, vfdfield_t, cfdfield_t, vcfdfield_t, dx_lists_t, dx_lists_mut
|
||||
from .types import fdfield_updater_t, cfdfield_updater_t
|
||||
from .vectorization import vec, unvec
|
||||
from . import operators, functional, types, vectorization
|
||||
from .types import (
|
||||
fdfield_t as fdfield_t,
|
||||
vfdfield_t as vfdfield_t,
|
||||
cfdfield_t as cfdfield_t,
|
||||
vcfdfield_t as vcfdfield_t,
|
||||
dx_lists_t as dx_lists_t,
|
||||
dx_lists_mut as dx_lists_mut,
|
||||
fdfield_updater_t as fdfield_updater_t,
|
||||
cfdfield_updater_t as cfdfield_updater_t,
|
||||
)
|
||||
from .vectorization import (
|
||||
vec as vec,
|
||||
unvec as unvec,
|
||||
)
|
||||
from . import (
|
||||
operators as operators,
|
||||
functional as functional,
|
||||
types as types,
|
||||
vectorization as vectorization,
|
||||
)
|
||||
|
||||
|
@ -3,16 +3,18 @@ Math functions for finite difference simulations
|
||||
|
||||
Basic discrete calculus etc.
|
||||
"""
|
||||
from typing import Sequence, Callable
|
||||
from typing import TypeVar
|
||||
from collections.abc import Sequence, Callable
|
||||
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
from numpy import floating, complexfloating
|
||||
|
||||
from .types import fdfield_t, fdfield_updater_t
|
||||
|
||||
|
||||
def deriv_forward(
|
||||
dx_e: Sequence[NDArray[numpy.float_]] | None = None,
|
||||
dx_e: Sequence[NDArray[floating]] | None = None,
|
||||
) -> tuple[fdfield_updater_t, fdfield_updater_t, fdfield_updater_t]:
|
||||
"""
|
||||
Utility operators for taking discretized derivatives (backward variant).
|
||||
@ -36,7 +38,7 @@ def deriv_forward(
|
||||
|
||||
|
||||
def deriv_back(
|
||||
dx_h: Sequence[NDArray[numpy.float_]] | None = None,
|
||||
dx_h: Sequence[NDArray[floating]] | None = None,
|
||||
) -> tuple[fdfield_updater_t, fdfield_updater_t, fdfield_updater_t]:
|
||||
"""
|
||||
Utility operators for taking discretized derivatives (forward variant).
|
||||
@ -59,9 +61,12 @@ def deriv_back(
|
||||
return derivs
|
||||
|
||||
|
||||
TT = TypeVar('TT', bound='NDArray[floating | complexfloating]')
|
||||
|
||||
|
||||
def curl_forward(
|
||||
dx_e: Sequence[NDArray[numpy.float_]] | None = None,
|
||||
) -> fdfield_updater_t:
|
||||
dx_e: Sequence[NDArray[floating]] | None = None,
|
||||
) -> Callable[[TT], TT]:
|
||||
r"""
|
||||
Curl operator for use with the E field.
|
||||
|
||||
@ -75,7 +80,7 @@ def curl_forward(
|
||||
"""
|
||||
Dx, Dy, Dz = deriv_forward(dx_e)
|
||||
|
||||
def ce_fun(e: fdfield_t) -> fdfield_t:
|
||||
def ce_fun(e: TT) -> TT:
|
||||
output = numpy.empty_like(e)
|
||||
output[0] = Dy(e[2])
|
||||
output[1] = Dz(e[0])
|
||||
@ -89,8 +94,8 @@ def curl_forward(
|
||||
|
||||
|
||||
def curl_back(
|
||||
dx_h: Sequence[NDArray[numpy.float_]] | None = None,
|
||||
) -> fdfield_updater_t:
|
||||
dx_h: Sequence[NDArray[floating]] | None = None,
|
||||
) -> Callable[[TT], TT]:
|
||||
r"""
|
||||
Create a function which takes the backward curl of a field.
|
||||
|
||||
@ -104,7 +109,7 @@ def curl_back(
|
||||
"""
|
||||
Dx, Dy, Dz = deriv_back(dx_h)
|
||||
|
||||
def ch_fun(h: fdfield_t) -> fdfield_t:
|
||||
def ch_fun(h: TT) -> TT:
|
||||
output = numpy.empty_like(h)
|
||||
output[0] = Dy(h[2])
|
||||
output[1] = Dz(h[0])
|
||||
@ -118,7 +123,7 @@ def curl_back(
|
||||
|
||||
|
||||
def curl_forward_parts(
|
||||
dx_e: Sequence[NDArray[numpy.float_]] | None = None,
|
||||
dx_e: Sequence[NDArray[floating]] | None = None,
|
||||
) -> Callable:
|
||||
Dx, Dy, Dz = deriv_forward(dx_e)
|
||||
|
||||
@ -131,7 +136,7 @@ def curl_forward_parts(
|
||||
|
||||
|
||||
def curl_back_parts(
|
||||
dx_h: Sequence[NDArray[numpy.float_]] | None = None,
|
||||
dx_h: Sequence[NDArray[floating]] | None = None,
|
||||
) -> Callable:
|
||||
Dx, Dy, Dz = deriv_back(dx_h)
|
||||
|
||||
|
@ -3,10 +3,11 @@ Matrix operators for finite difference simulations
|
||||
|
||||
Basic discrete calculus etc.
|
||||
"""
|
||||
from typing import Sequence
|
||||
from collections.abc import Sequence
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
import scipy.sparse as sparse # type: ignore
|
||||
from numpy import floating
|
||||
from scipy import sparse
|
||||
|
||||
from .types import vfdfield_t
|
||||
|
||||
@ -29,12 +30,12 @@ def shift_circ(
|
||||
Sparse matrix for performing the circular shift.
|
||||
"""
|
||||
if len(shape) not in (2, 3):
|
||||
raise Exception('Invalid shape: {}'.format(shape))
|
||||
raise Exception(f'Invalid shape: {shape}')
|
||||
if axis not in range(len(shape)):
|
||||
raise Exception('Invalid direction: {}, shape is {}'.format(axis, shape))
|
||||
raise Exception(f'Invalid direction: {axis}, shape is {shape}')
|
||||
|
||||
shifts = [abs(shift_distance) if a == axis else 0 for a in range(3)]
|
||||
shifted_diags = [(numpy.arange(n) + s) % n for n, s in zip(shape, shifts)]
|
||||
shifted_diags = [(numpy.arange(n) + s) % n for n, s in zip(shape, shifts, strict=True)]
|
||||
ijk = numpy.meshgrid(*shifted_diags, indexing='ij')
|
||||
|
||||
n = numpy.prod(shape)
|
||||
@ -69,12 +70,11 @@ def shift_with_mirror(
|
||||
Sparse matrix for performing the shift-with-mirror.
|
||||
"""
|
||||
if len(shape) not in (2, 3):
|
||||
raise Exception('Invalid shape: {}'.format(shape))
|
||||
raise Exception(f'Invalid shape: {shape}')
|
||||
if axis not in range(len(shape)):
|
||||
raise Exception('Invalid direction: {}, shape is {}'.format(axis, shape))
|
||||
raise Exception(f'Invalid direction: {axis}, shape is {shape}')
|
||||
if shift_distance >= shape[axis]:
|
||||
raise Exception('Shift ({}) is too large for axis {} of size {}'.format(
|
||||
shift_distance, axis, shape[axis]))
|
||||
raise Exception(f'Shift ({shift_distance}) is too large for axis {axis} of size {shape[axis]}')
|
||||
|
||||
def mirrored_range(n: int, s: int) -> NDArray[numpy.int_]:
|
||||
v = numpy.arange(n) + s
|
||||
@ -83,7 +83,7 @@ def shift_with_mirror(
|
||||
return v
|
||||
|
||||
shifts = [shift_distance if a == axis else 0 for a in range(3)]
|
||||
shifted_diags = [mirrored_range(n, s) for n, s in zip(shape, shifts)]
|
||||
shifted_diags = [mirrored_range(n, s) for n, s in zip(shape, shifts, strict=True)]
|
||||
ijk = numpy.meshgrid(*shifted_diags, indexing='ij')
|
||||
|
||||
n = numpy.prod(shape)
|
||||
@ -97,7 +97,7 @@ def shift_with_mirror(
|
||||
|
||||
|
||||
def deriv_forward(
|
||||
dx_e: Sequence[NDArray[numpy.float_]],
|
||||
dx_e: Sequence[NDArray[floating]],
|
||||
) -> list[sparse.spmatrix]:
|
||||
"""
|
||||
Utility operators for taking discretized derivatives (forward variant).
|
||||
@ -124,7 +124,7 @@ def deriv_forward(
|
||||
|
||||
|
||||
def deriv_back(
|
||||
dx_h: Sequence[NDArray[numpy.float_]],
|
||||
dx_h: Sequence[NDArray[floating]],
|
||||
) -> list[sparse.spmatrix]:
|
||||
"""
|
||||
Utility operators for taking discretized derivatives (backward variant).
|
||||
@ -198,7 +198,7 @@ def avg_forward(axis: int, shape: Sequence[int]) -> sparse.spmatrix:
|
||||
Sparse matrix for forward average operation.
|
||||
"""
|
||||
if len(shape) not in (2, 3):
|
||||
raise Exception('Invalid shape: {}'.format(shape))
|
||||
raise Exception(f'Invalid shape: {shape}')
|
||||
|
||||
n = numpy.prod(shape)
|
||||
return 0.5 * (sparse.eye(n) + shift_circ(axis, shape))
|
||||
@ -219,7 +219,7 @@ def avg_back(axis: int, shape: Sequence[int]) -> sparse.spmatrix:
|
||||
|
||||
|
||||
def curl_forward(
|
||||
dx_e: Sequence[NDArray[numpy.float_]],
|
||||
dx_e: Sequence[NDArray[floating]],
|
||||
) -> sparse.spmatrix:
|
||||
"""
|
||||
Curl operator for use with the E field.
|
||||
@ -235,7 +235,7 @@ def curl_forward(
|
||||
|
||||
|
||||
def curl_back(
|
||||
dx_h: Sequence[NDArray[numpy.float_]],
|
||||
dx_h: Sequence[NDArray[floating]],
|
||||
) -> sparse.spmatrix:
|
||||
"""
|
||||
Curl operator for use with the H field.
|
||||
|
@ -1,26 +1,26 @@
|
||||
"""
|
||||
Types shared across multiple submodules
|
||||
"""
|
||||
from typing import Sequence, Callable, MutableSequence
|
||||
import numpy
|
||||
from collections.abc import Sequence, Callable, MutableSequence
|
||||
from numpy.typing import NDArray
|
||||
from numpy import floating, complexfloating
|
||||
|
||||
|
||||
# Field types
|
||||
fdfield_t = NDArray[numpy.float_]
|
||||
fdfield_t = NDArray[floating]
|
||||
"""Vector field with shape (3, X, Y, Z) (e.g. `[E_x, E_y, E_z]`)"""
|
||||
|
||||
vfdfield_t = NDArray[numpy.float_]
|
||||
vfdfield_t = NDArray[floating]
|
||||
"""Linearized vector field (single vector of length 3*X*Y*Z)"""
|
||||
|
||||
cfdfield_t = NDArray[numpy.complex_]
|
||||
cfdfield_t = NDArray[complexfloating]
|
||||
"""Complex vector field with shape (3, X, Y, Z) (e.g. `[E_x, E_y, E_z]`)"""
|
||||
|
||||
vcfdfield_t = NDArray[numpy.complex_]
|
||||
vcfdfield_t = NDArray[complexfloating]
|
||||
"""Linearized complex vector field (single vector of length 3*X*Y*Z)"""
|
||||
|
||||
|
||||
dx_lists_t = Sequence[Sequence[NDArray[numpy.float_]]]
|
||||
dx_lists_t = Sequence[Sequence[NDArray[floating]]]
|
||||
"""
|
||||
'dxes' datastructure which contains grid cell width information in the following format:
|
||||
|
||||
@ -31,7 +31,7 @@ dx_lists_t = Sequence[Sequence[NDArray[numpy.float_]]]
|
||||
and `dy_h[0]` is the y-width of the `y=0` cells, as used when calculating dH/dy, etc.
|
||||
"""
|
||||
|
||||
dx_lists_mut = MutableSequence[MutableSequence[NDArray[numpy.float_]]]
|
||||
dx_lists_mut = MutableSequence[MutableSequence[NDArray[floating]]]
|
||||
"""Mutable version of `dx_lists_t`"""
|
||||
|
||||
|
||||
|
@ -4,7 +4,8 @@ and a 1D array representation of that field `[f_x0, f_x1, f_x2,... f_y0,... f_z0
|
||||
Vectorized versions of the field use row-major (ie., C-style) ordering.
|
||||
"""
|
||||
|
||||
from typing import overload, Sequence
|
||||
from typing import overload
|
||||
from collections.abc import Sequence
|
||||
import numpy
|
||||
from numpy.typing import ArrayLike
|
||||
|
||||
|
@ -159,8 +159,22 @@ Boundary conditions
|
||||
# TODO notes about boundaries / PMLs
|
||||
"""
|
||||
|
||||
from .base import maxwell_e, maxwell_h
|
||||
from .pml import cpml_params, updates_with_cpml
|
||||
from .energy import (poynting, poynting_divergence, energy_hstep, energy_estep,
|
||||
delta_energy_h2e, delta_energy_j)
|
||||
from .boundaries import conducting_boundary
|
||||
from .base import (
|
||||
maxwell_e as maxwell_e,
|
||||
maxwell_h as maxwell_h,
|
||||
)
|
||||
from .pml import (
|
||||
cpml_params as cpml_params,
|
||||
updates_with_cpml as updates_with_cpml,
|
||||
)
|
||||
from .energy import (
|
||||
poynting as poynting,
|
||||
poynting_divergence as poynting_divergence,
|
||||
energy_hstep as energy_hstep,
|
||||
energy_estep as energy_estep,
|
||||
delta_energy_h2e as delta_energy_h2e,
|
||||
delta_energy_j as delta_energy_j,
|
||||
)
|
||||
from .boundaries import (
|
||||
conducting_boundary as conducting_boundary,
|
||||
)
|
||||
|
@ -15,13 +15,17 @@ def conducting_boundary(
|
||||
) -> tuple[fdfield_updater_t, fdfield_updater_t]:
|
||||
dirs = [0, 1, 2]
|
||||
if direction not in dirs:
|
||||
raise Exception('Invalid direction: {}'.format(direction))
|
||||
raise Exception(f'Invalid direction: {direction}')
|
||||
dirs.remove(direction)
|
||||
u, v = dirs
|
||||
|
||||
boundary_slice: list[Any]
|
||||
shifted1_slice: list[Any]
|
||||
shifted2_slice: list[Any]
|
||||
|
||||
if polarity < 0:
|
||||
boundary_slice = [slice(None)] * 3 # type: list[Any]
|
||||
shifted1_slice = [slice(None)] * 3 # type: list[Any]
|
||||
boundary_slice = [slice(None)] * 3
|
||||
shifted1_slice = [slice(None)] * 3
|
||||
boundary_slice[direction] = 0
|
||||
shifted1_slice[direction] = 1
|
||||
|
||||
@ -42,7 +46,7 @@ def conducting_boundary(
|
||||
if polarity > 0:
|
||||
boundary_slice = [slice(None)] * 3
|
||||
shifted1_slice = [slice(None)] * 3
|
||||
shifted2_slice = [slice(None)] * 3 # type: list[Any]
|
||||
shifted2_slice = [slice(None)] * 3
|
||||
boundary_slice[direction] = -1
|
||||
shifted1_slice[direction] = -2
|
||||
shifted2_slice[direction] = -3
|
||||
@ -64,4 +68,4 @@ def conducting_boundary(
|
||||
|
||||
return ep, hp
|
||||
|
||||
raise Exception('Bad polarity: {}'.format(polarity))
|
||||
raise Exception(f'Bad polarity: {polarity}')
|
||||
|
@ -7,7 +7,8 @@ PML implementations
|
||||
"""
|
||||
# TODO retest pmls!
|
||||
|
||||
from typing import Callable, Sequence, Any
|
||||
from typing import Any
|
||||
from collections.abc import Callable, Sequence
|
||||
from copy import deepcopy
|
||||
import numpy
|
||||
from numpy.typing import NDArray, DTypeLike
|
||||
@ -33,10 +34,10 @@ def cpml_params(
|
||||
) -> dict[str, Any]:
|
||||
|
||||
if axis not in range(3):
|
||||
raise Exception('Invalid axis: {}'.format(axis))
|
||||
raise Exception(f'Invalid axis: {axis}')
|
||||
|
||||
if polarity not in (-1, 1):
|
||||
raise Exception('Invalid polarity: {}'.format(polarity))
|
||||
raise Exception(f'Invalid polarity: {polarity}')
|
||||
|
||||
if thickness <= 2:
|
||||
raise Exception('It would be wise to have a pml with 4+ cells of thickness')
|
||||
@ -111,7 +112,7 @@ def updates_with_cpml(
|
||||
params_H: list[list[tuple[Any, Any, Any, Any]]] = deepcopy(params_E)
|
||||
|
||||
for axis in range(3):
|
||||
for pp, polarity in enumerate((-1, 1)):
|
||||
for pp, _polarity in enumerate((-1, 1)):
|
||||
cpml_param = cpml_params[axis][pp]
|
||||
if cpml_param is None:
|
||||
psi_E[axis][pp] = (None, None)
|
||||
@ -184,7 +185,7 @@ def updates_with_cpml(
|
||||
def update_H(
|
||||
e: fdfield_t,
|
||||
h: fdfield_t,
|
||||
mu: fdfield_t = numpy.ones(3),
|
||||
mu: fdfield_t | tuple[int, int, int] = (1, 1, 1),
|
||||
) -> None:
|
||||
dyEx = Dfy(e[0])
|
||||
dzEx = Dfz(e[0])
|
||||
|
@ -3,7 +3,8 @@
|
||||
Test fixtures
|
||||
|
||||
"""
|
||||
from typing import Iterable, Any
|
||||
# ruff: noqa: ARG001
|
||||
from typing import Any
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
import pytest # type: ignore
|
||||
@ -20,18 +21,18 @@ FixtureRequest = Any
|
||||
(5, 5, 5),
|
||||
# (7, 7, 7),
|
||||
])
|
||||
def shape(request: FixtureRequest) -> Iterable[tuple[int, ...]]:
|
||||
yield (3, *request.param)
|
||||
def shape(request: FixtureRequest) -> tuple[int, ...]:
|
||||
return (3, *request.param)
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', params=[1.0, 1.5])
|
||||
def epsilon_bg(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def epsilon_bg(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', params=[1.0, 2.5])
|
||||
def epsilon_fg(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def epsilon_fg(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', params=['center', '000', 'random'])
|
||||
@ -40,7 +41,7 @@ def epsilon(
|
||||
shape: tuple[int, ...],
|
||||
epsilon_bg: float,
|
||||
epsilon_fg: float,
|
||||
) -> Iterable[NDArray[numpy.float64]]:
|
||||
) -> NDArray[numpy.float64]:
|
||||
is3d = (numpy.array(shape) == 1).sum() == 0
|
||||
if is3d:
|
||||
if request.param == '000':
|
||||
@ -60,17 +61,17 @@ def epsilon(
|
||||
high=max(epsilon_bg, epsilon_fg),
|
||||
size=shape)
|
||||
|
||||
yield epsilon
|
||||
return epsilon
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', params=[1.0]) # 1.5
|
||||
def j_mag(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def j_mag(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', params=[1.0, 1.5])
|
||||
def dx(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def dx(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(scope='module', params=['uniform', 'centerbig'])
|
||||
@ -78,7 +79,7 @@ def dxes(
|
||||
request: FixtureRequest,
|
||||
shape: tuple[int, ...],
|
||||
dx: float,
|
||||
) -> Iterable[list[list[NDArray[numpy.float64]]]]:
|
||||
) -> list[list[NDArray[numpy.float64]]]:
|
||||
if request.param == 'uniform':
|
||||
dxes = [[numpy.full(s, dx) for s in shape[1:]] for _ in range(2)]
|
||||
elif request.param == 'centerbig':
|
||||
@ -90,5 +91,5 @@ def dxes(
|
||||
dxe = [PRNG.uniform(low=1.0 * dx, high=1.1 * dx, size=s) for s in shape[1:]]
|
||||
dxh = [(d + numpy.roll(d, -1)) / 2 for d in dxe]
|
||||
dxes = [dxe, dxh]
|
||||
yield dxes
|
||||
return dxes
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Iterable
|
||||
# ruff: noqa: ARG001
|
||||
import dataclasses
|
||||
import pytest # type: ignore
|
||||
import numpy
|
||||
@ -61,24 +61,24 @@ def test_poynting_planes(sim: 'FDResult') -> None:
|
||||
# Also see conftest.py
|
||||
|
||||
@pytest.fixture(params=[1 / 1500])
|
||||
def omega(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def omega(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=[None])
|
||||
def pec(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
|
||||
yield request.param
|
||||
def pec(request: FixtureRequest) -> NDArray[numpy.float64] | None:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=[None])
|
||||
def pmc(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
|
||||
yield request.param
|
||||
def pmc(request: FixtureRequest) -> NDArray[numpy.float64] | None:
|
||||
return request.param
|
||||
|
||||
|
||||
#@pytest.fixture(scope='module',
|
||||
# params=[(25, 5, 5)])
|
||||
#def shape(request):
|
||||
# yield (3, *request.param)
|
||||
#def shape(request: FixtureRequest):
|
||||
# return (3, *request.param)
|
||||
|
||||
|
||||
@pytest.fixture(params=['diag']) # 'center'
|
||||
@ -86,7 +86,7 @@ def j_distribution(
|
||||
request: FixtureRequest,
|
||||
shape: tuple[int, ...],
|
||||
j_mag: float,
|
||||
) -> Iterable[NDArray[numpy.float64]]:
|
||||
) -> NDArray[numpy.float64]:
|
||||
j = numpy.zeros(shape, dtype=complex)
|
||||
center_mask = numpy.zeros(shape, dtype=bool)
|
||||
center_mask[:, shape[1] // 2, shape[2] // 2, shape[3] // 2] = True
|
||||
@ -96,7 +96,7 @@ def j_distribution(
|
||||
elif request.param == 'diag':
|
||||
j[numpy.roll(center_mask, [1, 1, 1], axis=(1, 2, 3))] = (1 + 1j) * j_mag
|
||||
j[numpy.roll(center_mask, [-1, -1, -1], axis=(1, 2, 3))] = (1 - 1j) * j_mag
|
||||
yield j
|
||||
return j
|
||||
|
||||
|
||||
@dataclasses.dataclass()
|
||||
@ -145,7 +145,7 @@ def sim(
|
||||
omega=omega,
|
||||
dxes=dxes,
|
||||
epsilon=eps_vec,
|
||||
matrix_solver_opts={'atol': 1e-15, 'tol': 1e-11},
|
||||
matrix_solver_opts={'atol': 1e-15, 'rtol': 1e-11},
|
||||
)
|
||||
e = unvec(e_vec, shape[1:])
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Iterable
|
||||
# ruff: noqa: ARG001
|
||||
import pytest # type: ignore
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
@ -44,30 +44,30 @@ def test_pml(sim: FDResult, src_polarity: int) -> None:
|
||||
# Also see conftest.py
|
||||
|
||||
@pytest.fixture(params=[1 / 1500])
|
||||
def omega(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def omega(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=[None])
|
||||
def pec(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
|
||||
yield request.param
|
||||
def pec(request: FixtureRequest) -> NDArray[numpy.float64] | None:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=[None])
|
||||
def pmc(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
|
||||
yield request.param
|
||||
def pmc(request: FixtureRequest) -> NDArray[numpy.float64] | None:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=[(30, 1, 1),
|
||||
(1, 30, 1),
|
||||
(1, 1, 30)])
|
||||
def shape(request: FixtureRequest) -> Iterable[tuple[int, ...]]:
|
||||
yield (3, *request.param)
|
||||
def shape(request: FixtureRequest) -> tuple[int, int, int]:
|
||||
return (3, *request.param)
|
||||
|
||||
|
||||
@pytest.fixture(params=[+1, -1])
|
||||
def src_polarity(request: FixtureRequest) -> Iterable[int]:
|
||||
yield request.param
|
||||
def src_polarity(request: FixtureRequest) -> int:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@ -78,7 +78,7 @@ def j_distribution(
|
||||
dxes: dx_lists_mut,
|
||||
omega: float,
|
||||
src_polarity: int,
|
||||
) -> Iterable[NDArray[numpy.complex128]]:
|
||||
) -> NDArray[numpy.complex128]:
|
||||
j = numpy.zeros(shape, dtype=complex)
|
||||
|
||||
dim = numpy.where(numpy.array(shape[1:]) > 1)[0][0] # Propagation axis
|
||||
@ -106,7 +106,7 @@ def j_distribution(
|
||||
|
||||
j = fdfd.waveguide_3d.compute_source(E=e, wavenumber=wavenumber_corrected, omega=omega, dxes=dxes,
|
||||
axis=dim, polarity=src_polarity, slices=slices, epsilon=epsilon)
|
||||
yield j
|
||||
return j
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@ -115,9 +115,9 @@ def epsilon(
|
||||
shape: tuple[int, ...],
|
||||
epsilon_bg: float,
|
||||
epsilon_fg: float,
|
||||
) -> Iterable[NDArray[numpy.float64]]:
|
||||
) -> NDArray[numpy.float64]:
|
||||
epsilon = numpy.full(shape, epsilon_fg, dtype=float)
|
||||
yield epsilon
|
||||
return epsilon
|
||||
|
||||
|
||||
@pytest.fixture(params=['uniform'])
|
||||
@ -127,7 +127,7 @@ def dxes(
|
||||
dx: float,
|
||||
omega: float,
|
||||
epsilon_fg: float,
|
||||
) -> Iterable[list[list[NDArray[numpy.float64]]]]:
|
||||
) -> list[list[NDArray[numpy.float64]]]:
|
||||
if request.param == 'uniform':
|
||||
dxes = [[numpy.full(s, dx) for s in shape[1:]] for _ in range(2)]
|
||||
dim = numpy.where(numpy.array(shape[1:]) > 1)[0][0] # Propagation axis
|
||||
@ -141,7 +141,7 @@ def dxes(
|
||||
epsilon_effective=epsilon_fg,
|
||||
thickness=10,
|
||||
)
|
||||
yield dxes
|
||||
return dxes
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@ -162,7 +162,7 @@ def sim(
|
||||
omega=omega,
|
||||
dxes=dxes,
|
||||
epsilon=eps_vec,
|
||||
matrix_solver_opts={'atol': 1e-15, 'tol': 1e-11},
|
||||
matrix_solver_opts={'atol': 1e-15, 'rtol': 1e-11},
|
||||
)
|
||||
e = unvec(e_vec, shape[1:])
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
from typing import Iterable, Any
|
||||
# ruff: noqa: ARG001
|
||||
from typing import Any
|
||||
import dataclasses
|
||||
import pytest # type: ignore
|
||||
import numpy
|
||||
@ -101,7 +102,7 @@ def test_poynting_divergence(sim: 'TDResult') -> None:
|
||||
def test_poynting_planes(sim: 'TDResult') -> None:
|
||||
mask = (sim.js[0] != 0).any(axis=0)
|
||||
if mask.sum() > 1:
|
||||
pytest.skip('test_poynting_planes can only test single point sources, got {}'.format(mask.sum()))
|
||||
pytest.skip(f'test_poynting_planes can only test single point sources, got {mask.sum()}')
|
||||
|
||||
args: dict[str, Any] = {
|
||||
'dxes': sim.dxes,
|
||||
@ -150,8 +151,8 @@ def test_poynting_planes(sim: 'TDResult') -> None:
|
||||
|
||||
|
||||
@pytest.fixture(params=[0.3])
|
||||
def dt(request: FixtureRequest) -> Iterable[float]:
|
||||
yield request.param
|
||||
def dt(request: FixtureRequest) -> float:
|
||||
return request.param
|
||||
|
||||
|
||||
@dataclasses.dataclass()
|
||||
@ -168,8 +169,8 @@ class TDResult:
|
||||
|
||||
|
||||
@pytest.fixture(params=[(0, 4, 8)]) # (0,)
|
||||
def j_steps(request: FixtureRequest) -> Iterable[tuple[int, ...]]:
|
||||
yield request.param
|
||||
def j_steps(request: FixtureRequest) -> tuple[int, ...]:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture(params=['center', 'random'])
|
||||
@ -177,7 +178,7 @@ def j_distribution(
|
||||
request: FixtureRequest,
|
||||
shape: tuple[int, ...],
|
||||
j_mag: float,
|
||||
) -> Iterable[NDArray[numpy.float64]]:
|
||||
) -> NDArray[numpy.float64]:
|
||||
j = numpy.zeros(shape)
|
||||
if request.param == 'center':
|
||||
j[:, shape[1] // 2, shape[2] // 2, shape[3] // 2] = j_mag
|
||||
@ -185,7 +186,7 @@ def j_distribution(
|
||||
j[:, 0, 0, 0] = j_mag
|
||||
elif request.param == 'random':
|
||||
j[:] = PRNG.uniform(low=-j_mag, high=j_mag, size=shape)
|
||||
yield j
|
||||
return j
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@ -199,8 +200,7 @@ def sim(
|
||||
j_steps: tuple[int, ...],
|
||||
) -> TDResult:
|
||||
is3d = (numpy.array(shape) == 1).sum() == 0
|
||||
if is3d:
|
||||
if dt != 0.3:
|
||||
if is3d and dt != 0.3:
|
||||
pytest.skip('Skipping dt != 0.3 because test is 3D (for speed)')
|
||||
|
||||
sim = TDResult(
|
||||
|
@ -1,5 +1,3 @@
|
||||
from typing import Any
|
||||
|
||||
import numpy
|
||||
from numpy.typing import NDArray
|
||||
|
||||
@ -10,22 +8,25 @@ PRNG = numpy.random.RandomState(12345)
|
||||
def assert_fields_close(
|
||||
x: NDArray,
|
||||
y: NDArray,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
numpy.testing.assert_allclose(
|
||||
x, y, verbose=False, # type: ignore
|
||||
err_msg='Fields did not match:\n{}\n{}'.format(numpy.moveaxis(x, -1, 0),
|
||||
numpy.moveaxis(y, -1, 0)),
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
x_disp = numpy.moveaxis(x, -1, 0)
|
||||
y_disp = numpy.moveaxis(y, -1, 0)
|
||||
numpy.testing.assert_allclose(
|
||||
x, # type: ignore
|
||||
y, # type: ignore
|
||||
*args,
|
||||
verbose=False,
|
||||
err_msg=f'Fields did not match:\n{x_disp}\n{y_disp}',
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def assert_close(
|
||||
x: NDArray,
|
||||
y: NDArray,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
numpy.testing.assert_allclose(x, y, *args, **kwargs)
|
||||
|
||||
|
@ -39,8 +39,8 @@ include = [
|
||||
]
|
||||
dynamic = ["version"]
|
||||
dependencies = [
|
||||
"numpy~=1.21",
|
||||
"scipy",
|
||||
"numpy>=1.26",
|
||||
"scipy~=1.14",
|
||||
]
|
||||
|
||||
|
||||
@ -51,3 +51,48 @@ path = "meanas/__init__.py"
|
||||
dev = ["pytest", "pdoc", "gridlock"]
|
||||
examples = ["gridlock"]
|
||||
test = ["pytest"]
|
||||
|
||||
|
||||
[tool.ruff]
|
||||
exclude = [
|
||||
".git",
|
||||
"dist",
|
||||
]
|
||||
line-length = 245
|
||||
indent-width = 4
|
||||
lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
|
||||
lint.select = [
|
||||
"NPY", "E", "F", "W", "B", "ANN", "UP", "SLOT", "SIM", "LOG",
|
||||
"C4", "ISC", "PIE", "PT", "RET", "TCH", "PTH", "INT",
|
||||
"ARG", "PL", "R", "TRY",
|
||||
"G010", "G101", "G201", "G202",
|
||||
"Q002", "Q003", "Q004",
|
||||
]
|
||||
lint.ignore = [
|
||||
#"ANN001", # No annotation
|
||||
"ANN002", # *args
|
||||
"ANN003", # **kwargs
|
||||
"ANN401", # Any
|
||||
"ANN101", # self: Self
|
||||
"SIM108", # single-line if / else assignment
|
||||
"RET504", # x=y+z; return x
|
||||
"PIE790", # unnecessary pass
|
||||
"ISC003", # non-implicit string concatenation
|
||||
"C408", # dict(x=y) instead of {'x': y}
|
||||
"PLR09", # Too many xxx
|
||||
"PLR2004", # magic number
|
||||
"PLC0414", # import x as x
|
||||
"TRY003", # Long exception message
|
||||
"TRY002", # Exception()
|
||||
]
|
||||
|
||||
|
||||
[[tool.mypy.overrides]]
|
||||
module = [
|
||||
"scipy",
|
||||
"scipy.optimize",
|
||||
"scipy.linalg",
|
||||
"scipy.sparse",
|
||||
"scipy.sparse.linalg",
|
||||
]
|
||||
ignore_missing_imports = true
|
||||
|
Loading…
Reference in New Issue
Block a user