Compare commits

..

No commits in common. "739e96df3d9fb83b4cb62c26867a68fadfbf69eb" and "ccfd4fbf04ac40eadc97527bbeef3b9254d27cbe" have entirely different histories.

28 changed files with 205 additions and 414 deletions

View File

@ -157,8 +157,7 @@ def main():
e[1][tuple(grid.shape//2)] += field_source(t) e[1][tuple(grid.shape//2)] += field_source(t)
update_H(e, h) update_H(e, h)
avg_rate = (t + 1)/(time.perf_counter() - start)) print('iteration {}: average {} iterations per sec'.format(t, (t+1)/(time.perf_counter()-start)))
print(f'iteration {t}: average {avg_rate} iterations per sec')
sys.stdout.flush() sys.stdout.flush()
if t % 20 == 0: if t % 20 == 0:

View File

@ -3,7 +3,7 @@ import numpy
from numpy.linalg import norm from numpy.linalg import norm
from meanas.fdmath import vec, unvec from meanas.fdmath import vec, unvec
from meanas.fdfd import waveguide_cyl, functional, scpml from meanas.fdfd import waveguide_mode, functional, scpml
from meanas.fdfd.solvers import generic as generic_solver from meanas.fdfd.solvers import generic as generic_solver
import gridlock import gridlock
@ -37,34 +37,29 @@ def test1(solver=generic_solver):
xyz_max = numpy.array([800, y_max, z_max]) + (pml_thickness + 2) * dx xyz_max = numpy.array([800, y_max, z_max]) + (pml_thickness + 2) * dx
# Coordinates of the edges of the cells. # Coordinates of the edges of the cells.
half_edge_coords = [numpy.arange(dx / 2, m + dx / 2, step=dx) for m in xyz_max] half_edge_coords = [numpy.arange(dx/2, m + dx/2, step=dx) for m in xyz_max]
edge_coords = [numpy.hstack((-h[::-1], h)) for h in half_edge_coords] edge_coords = [numpy.hstack((-h[::-1], h)) for h in half_edge_coords]
edge_coords[0] = numpy.array([-dx, dx]) edge_coords[0] = numpy.array([-dx, dx])
# #### Create the grid and draw the device #### # #### Create the grid and draw the device ####
grid = gridlock.Grid(edge_coords) grid = gridlock.Grid(edge_coords)
epsilon = grid.allocate(n_air**2, dtype=numpy.float32) epsilon = grid.allocate(n_air**2, dtype=numpy.float32)
grid.draw_cuboid(epsilon, center=center, dimensions=[8e3, w, th], foreground=n_wg**2) grid.draw_cuboid(epsilon, center=center, dimensions=[8e3, w, th], eps=n_wg**2)
dxes = [grid.dxyz, grid.autoshifted_dxyz()] dxes = [grid.dxyz, grid.autoshifted_dxyz()]
for a in (1, 2): for a in (1, 2):
for p in (-1, 1): for p in (-1, 1):
dxes = scpml.stretch_with_scpml( dxes = scmpl.stretch_with_scpml(dxes, omega=omega, axis=a, polarity=p,
dxes, thickness=pml_thickness)
omega=omega,
axis=a,
polarity=p,
thickness=pml_thickness,
)
wg_args = { wg_args = {
'omega': omega, 'omega': omega,
'dxes': [(d[1], d[2]) for d in dxes], 'dxes': [(d[1], d[2]) for d in dxes],
'epsilon': vec(epsilon.transpose([0, 2, 3, 1])), 'epsilon': vec(g.transpose([1, 2, 0]) for g in epsilon),
'r0': r0, 'r0': r0,
} }
wg_results = waveguide_cyl.solve_mode(mode_number=0, **wg_args) wg_results = waveguide_mode.solve_waveguide_mode_cylindrical(mode_number=0, **wg_args)
E = wg_results['E'] E = wg_results['E']
@ -75,17 +70,20 @@ def test1(solver=generic_solver):
''' '''
Plot results Plot results
''' '''
def pcolor(fig, ax, v, title): def pcolor(v):
vmax = numpy.max(numpy.abs(v)) vmax = numpy.max(numpy.abs(v))
mappable = ax.pcolormesh(v.T, cmap='seismic', vmin=-vmax, vmax=vmax) pyplot.pcolor(v.T, cmap='seismic', vmin=-vmax, vmax=vmax)
ax.set_aspect('equal', adjustable='box') pyplot.axis('equal')
ax.set_title(title) pyplot.colorbar()
ax.figure.colorbar(mappable)
fig, axes = pyplot.subplots(2, 2) pyplot.figure()
pcolor(fig, axes[0][0], numpy.real(E[0]), 'Ex') pyplot.subplot(2, 2, 1)
pcolor(fig, axes[0][1], numpy.real(E[1]), 'Ey') pcolor(numpy.real(E[0][:, :]))
pcolor(fig, axes[1][0], numpy.real(E[2]), 'Ez') pyplot.subplot(2, 2, 2)
pcolor(numpy.real(E[1][:, :]))
pyplot.subplot(2, 2, 3)
pcolor(numpy.real(E[2][:, :]))
pyplot.subplot(2, 2, 4)
pyplot.show() pyplot.show()

View File

@ -11,8 +11,7 @@ __author__ = 'Jan Petykiewicz'
try: try:
readme_path = pathlib.Path(__file__).parent / 'README.md' with open(pathlib.Path(__file__).parent / 'README.md', 'r') as f:
with readme_path.open('r') as f:
__doc__ = f.read() __doc__ = f.read()
except Exception: except Exception:
pass pass

View File

@ -1,12 +1,12 @@
""" """
Solvers for eigenvalue / eigenvector problems Solvers for eigenvalue / eigenvector problems
""" """
from collections.abc import Callable from typing import Callable
import numpy import numpy
from numpy.typing import NDArray, ArrayLike from numpy.typing import NDArray, ArrayLike
from numpy.linalg import norm from numpy.linalg import norm
from scipy import sparse from scipy import sparse # type: ignore
import scipy.sparse.linalg as spalg import scipy.sparse.linalg as spalg # type: ignore
def power_iteration( def power_iteration(
@ -25,9 +25,8 @@ def power_iteration(
Returns: Returns:
(Largest-magnitude eigenvalue, Corresponding eigenvector estimate) (Largest-magnitude eigenvalue, Corresponding eigenvector estimate)
""" """
rng = numpy.random.default_rng()
if guess_vector is None: if guess_vector is None:
v = rng.random(operator.shape[0]) + 1j * rng.random(operator.shape[0]) v = numpy.random.rand(operator.shape[0]) + 1j * numpy.random.rand(operator.shape[0])
else: else:
v = guess_vector v = guess_vector

View File

@ -91,12 +91,5 @@ $$
""" """
from . import ( from . import solvers, operators, functional, scpml, waveguide_2d, waveguide_3d
solvers as solvers,
operators as operators,
functional as functional,
scpml as scpml,
waveguide_2d as waveguide_2d,
waveguide_3d as waveguide_3d,
)
# from . import farfield, bloch TODO # from . import farfield, bloch TODO

View File

@ -94,17 +94,16 @@ This module contains functions for generating and solving the
""" """
from typing import Any, cast from typing import Callable, Any, cast, Sequence
from collections.abc import Callable, Sequence
import logging import logging
import numpy import numpy
from numpy import pi, real, trace from numpy import pi, real, trace
from numpy.fft import fftfreq from numpy.fft import fftfreq
from numpy.typing import NDArray, ArrayLike from numpy.typing import NDArray, ArrayLike
import scipy import scipy # type: ignore
import scipy.optimize import scipy.optimize # type: ignore
from scipy.linalg import norm from scipy.linalg import norm # type: ignore
import scipy.sparse.linalg as spalg import scipy.sparse.linalg as spalg # type: ignore
from ..fdmath import fdfield_t, cfdfield_t from ..fdmath import fdfield_t, cfdfield_t
@ -115,6 +114,7 @@ logger = logging.getLogger(__name__)
try: try:
import pyfftw.interfaces.numpy_fft # type: ignore import pyfftw.interfaces.numpy_fft # type: ignore
import pyfftw.interfaces # type: ignore import pyfftw.interfaces # type: ignore
import multiprocessing
logger.info('Using pyfftw') logger.info('Using pyfftw')
pyfftw.interfaces.cache.enable() pyfftw.interfaces.cache.enable()
@ -232,7 +232,7 @@ def maxwell_operator(
Raveled conv(1/mu_k, ik x conv(1/eps_k, ik x h_mn)), returned Raveled conv(1/mu_k, ik x conv(1/eps_k, ik x h_mn)), returned
and overwritten in-place of `h`. and overwritten in-place of `h`.
""" """
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2)) hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
#{d,e,h}_xyz fields are complex 3-fields in (1/x, 1/y, 1/z) basis #{d,e,h}_xyz fields are complex 3-fields in (1/x, 1/y, 1/z) basis
@ -303,12 +303,12 @@ def hmn_2_exyz(
k_mag, m, n = generate_kmn(k0, G_matrix, shape) k_mag, m, n = generate_kmn(k0, G_matrix, shape)
def operator(h: NDArray[numpy.complex128]) -> cfdfield_t: def operator(h: NDArray[numpy.complex128]) -> cfdfield_t:
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2)) hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
d_xyz = (n * hin_m d_xyz = (n * hin_m
- m * hin_n) * k_mag # noqa: E128 - m * hin_n) * k_mag # noqa: E128
# divide by epsilon # divide by epsilon
return numpy.moveaxis(ifftn(d_xyz, axes=range(3)) / epsilon, 3, 0) return numpy.array([ei for ei in numpy.moveaxis(ifftn(d_xyz, axes=range(3)) / epsilon, 3, 0)]) # TODO avoid copy
return operator return operator
@ -341,7 +341,7 @@ def hmn_2_hxyz(
_k_mag, m, n = generate_kmn(k0, G_matrix, shape) _k_mag, m, n = generate_kmn(k0, G_matrix, shape)
def operator(h: NDArray[numpy.complex128]) -> cfdfield_t: def operator(h: NDArray[numpy.complex128]) -> cfdfield_t:
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2)) hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
h_xyz = (m * hin_m h_xyz = (m * hin_m
+ n * hin_n) # noqa: E128 + n * hin_n) # noqa: E128
return numpy.array([ifftn(hi) for hi in numpy.moveaxis(h_xyz, 3, 0)]) return numpy.array([ifftn(hi) for hi in numpy.moveaxis(h_xyz, 3, 0)])
@ -394,7 +394,7 @@ def inverse_maxwell_operator_approx(
Returns: Returns:
Raveled ik x conv(eps_k, ik x conv(mu_k, h_mn)) Raveled ik x conv(eps_k, ik x conv(mu_k, h_mn))
""" """
hin_m, hin_n = (hi.reshape(shape) for hi in numpy.split(h, 2)) hin_m, hin_n = [hi.reshape(shape) for hi in numpy.split(h, 2)]
#{d,e,h}_xyz fields are complex 3-fields in (1/x, 1/y, 1/z) basis #{d,e,h}_xyz fields are complex 3-fields in (1/x, 1/y, 1/z) basis
@ -561,10 +561,9 @@ def eigsolve(
prev_theta = 0.5 prev_theta = 0.5
D = numpy.zeros(shape=y_shape, dtype=complex) D = numpy.zeros(shape=y_shape, dtype=complex)
rng = numpy.random.default_rng()
Z: NDArray[numpy.complex128] Z: NDArray[numpy.complex128]
if y0 is None: if y0 is None:
Z = rng.random(y_shape) + 1j * rng.random(y_shape) Z = numpy.random.rand(*y_shape) + 1j * numpy.random.rand(*y_shape)
else: else:
Z = numpy.array(y0, copy=False).T Z = numpy.array(y0, copy=False).T
@ -574,7 +573,7 @@ def eigsolve(
try: try:
U = numpy.linalg.inv(ZtZ) U = numpy.linalg.inv(ZtZ)
except numpy.linalg.LinAlgError: except numpy.linalg.LinAlgError:
Z = rng.random(y_shape) + 1j * rng.random(y_shape) Z = numpy.random.rand(*y_shape) + 1j * numpy.random.rand(*y_shape)
continue continue
trace_U = real(trace(U)) trace_U = real(trace(U))
@ -647,7 +646,8 @@ def eigsolve(
Qi_memo: list[float | None] = [None, None] Qi_memo: list[float | None] = [None, None]
def Qi_func(theta: float, Qi_memo=Qi_memo, ZtZ=ZtZ, DtD=DtD, symZtD=symZtD) -> float: # noqa: ANN001 def Qi_func(theta: float) -> float:
nonlocal Qi_memo
if Qi_memo[0] == theta: if Qi_memo[0] == theta:
return cast(float, Qi_memo[1]) return cast(float, Qi_memo[1])
@ -656,7 +656,7 @@ def eigsolve(
Q = c * c * ZtZ + s * s * DtD + 2 * s * c * symZtD Q = c * c * ZtZ + s * s * DtD + 2 * s * c * symZtD
try: try:
Qi = numpy.linalg.inv(Q) Qi = numpy.linalg.inv(Q)
except numpy.linalg.LinAlgError as err: except numpy.linalg.LinAlgError:
logger.info('taylor Qi') logger.info('taylor Qi')
# if c or s small, taylor expand # if c or s small, taylor expand
if c < 1e-4 * s and c != 0: if c < 1e-4 * s and c != 0:
@ -666,12 +666,12 @@ def eigsolve(
ZtZi = numpy.linalg.inv(ZtZ) ZtZi = numpy.linalg.inv(ZtZ)
Qi = ZtZi / (c * c) - 2 * s / (c * c * c) * (ZtZi @ (ZtZi @ symZtD).conj().T) Qi = ZtZi / (c * c) - 2 * s / (c * c * c) * (ZtZi @ (ZtZi @ symZtD).conj().T)
else: else:
raise Exception('Inexplicable singularity in trace_func') from err raise Exception('Inexplicable singularity in trace_func')
Qi_memo[0] = theta Qi_memo[0] = theta
Qi_memo[1] = cast(float, Qi) Qi_memo[1] = cast(float, Qi)
return cast(float, Qi) return cast(float, Qi)
def trace_func(theta: float, ZtAZ=ZtAZ, DtAD=DtAD, symZtAD=symZtAD) -> float: # noqa: ANN001 def trace_func(theta: float) -> float:
c = numpy.cos(theta) c = numpy.cos(theta)
s = numpy.sin(theta) s = numpy.sin(theta)
Qi = Qi_func(theta) Qi = Qi_func(theta)
@ -680,15 +680,15 @@ def eigsolve(
return numpy.abs(trace) return numpy.abs(trace)
if False: if False:
def trace_deriv(theta, sgn: int = sgn, ZtAZ=ZtAZ, DtAD=DtAD, symZtD=symZtD, symZtAD=symZtAD, ZtZ=ZtZ, DtD=DtD): # noqa: ANN001 def trace_deriv(theta):
Qi = Qi_func(theta) Qi = Qi_func(theta)
c2 = numpy.cos(2 * theta) c2 = numpy.cos(2 * theta)
s2 = numpy.sin(2 * theta) s2 = numpy.sin(2 * theta)
F = -0.5 * s2 * (ZtAZ - DtAD) + c2 * symZtAD F = -0.5*s2 * (ZtAZ - DtAD) + c2 * symZtAD
trace_deriv = _rtrace_AtB(Qi, F) trace_deriv = _rtrace_AtB(Qi, F)
G = Qi @ F.conj().T @ Qi.conj().T G = Qi @ F.conj().T @ Qi.conj().T
H = -0.5 * s2 * (ZtZ - DtD) + c2 * symZtD H = -0.5*s2 * (ZtZ - DtD) + c2 * symZtD
trace_deriv -= _rtrace_AtB(G, H) trace_deriv -= _rtrace_AtB(G, H)
trace_deriv *= 2 trace_deriv *= 2
@ -696,12 +696,12 @@ def eigsolve(
U_sZtD = U @ symZtD U_sZtD = U @ symZtD
dE = 2.0 * (_rtrace_AtB(U, symZtAD) dE = 2.0 * (_rtrace_AtB(U, symZtAD) -
- _rtrace_AtB(ZtAZU, U_sZtD)) _rtrace_AtB(ZtAZU, U_sZtD))
d2E = 2 * (_rtrace_AtB(U, DtAD) d2E = 2 * (_rtrace_AtB(U, DtAD) -
- _rtrace_AtB(ZtAZU, U @ (DtD - 4 * symZtD @ U_sZtD)) _rtrace_AtB(ZtAZU, U @ (DtD - 4 * symZtD @ U_sZtD)) -
- 4 * _rtrace_AtB(U, symZtAD @ U_sZtD)) 4 * _rtrace_AtB(U, symZtAD @ U_sZtD))
# Newton-Raphson to find a root of the first derivative: # Newton-Raphson to find a root of the first derivative:
theta = -dE / d2E theta = -dE / d2E
@ -781,7 +781,7 @@ def linmin(x_guess, f0, df0, x_max, f_tol=0.1, df_tol=min(tolerance, 1e-6), x_to
x_min, x_max, isave, dsave) x_min, x_max, isave, dsave)
for i in range(int(1e6)): for i in range(int(1e6)):
if task != 'F': if task != 'F':
logging.info(f'search converged in {i} iterations') logging.info('search converged in {} iterations'.format(i))
break break
fx = f(x, dfx) fx = f(x, dfx)
x, fx, dfx, task = minpack2.dsrch(x, fx, dfx, f_tol, df_tol, x_tol, task, x, fx, dfx, task = minpack2.dsrch(x, fx, dfx, f_tol, df_tol, x_tol, task,

View File

@ -1,8 +1,7 @@
""" """
Functions for performing near-to-farfield transformation (and the reverse). Functions for performing near-to-farfield transformation (and the reverse).
""" """
from typing import Any, cast from typing import Any, Sequence, cast
from collections.abc import Sequence
import numpy import numpy
from numpy.fft import fft2, fftshift, fftfreq, ifft2, ifftshift from numpy.fft import fft2, fftshift, fftfreq, ifft2, ifftshift
from numpy import pi from numpy import pi

View File

@ -5,7 +5,7 @@ Functional versions of many FDFD operators. These can be useful for performing
The functions generated here expect `cfdfield_t` inputs with shape (3, X, Y, Z), The functions generated here expect `cfdfield_t` inputs with shape (3, X, Y, Z),
e.g. E = [E_x, E_y, E_z] where each (complex) component has shape (X, Y, Z) e.g. E = [E_x, E_y, E_z] where each (complex) component has shape (X, Y, Z)
""" """
from collections.abc import Callable from typing import Callable
import numpy import numpy
from ..fdmath import dx_lists_t, fdfield_t, cfdfield_t, cfdfield_updater_t from ..fdmath import dx_lists_t, fdfield_t, cfdfield_t, cfdfield_updater_t
@ -47,6 +47,7 @@ def e_full(
if mu is None: if mu is None:
return op_1 return op_1
else:
return op_mu return op_mu
@ -83,6 +84,7 @@ def eh_full(
if mu is None: if mu is None:
return op_1 return op_1
else:
return op_mu return op_mu
@ -114,6 +116,7 @@ def e2h(
if mu is None: if mu is None:
return e2h_1_1 return e2h_1_1
else:
return e2h_mu return e2h_mu
@ -148,6 +151,7 @@ def m2j(
if mu is None: if mu is None:
return m2j_1 return m2j_1
else:
return m2j_mu return m2j_mu

View File

@ -28,7 +28,7 @@ The following operators are included:
""" """
import numpy import numpy
from scipy import sparse import scipy.sparse as sparse # type: ignore
from ..fdmath import vec, dx_lists_t, vfdfield_t, vcfdfield_t from ..fdmath import vec, dx_lists_t, vfdfield_t, vcfdfield_t
from ..fdmath.operators import shift_with_mirror, shift_circ, curl_forward, curl_back from ..fdmath.operators import shift_with_mirror, shift_circ, curl_forward, curl_back
@ -321,11 +321,11 @@ def poynting_e_cross(e: vcfdfield_t, dxes: dx_lists_t) -> sparse.spmatrix:
""" """
shape = [len(dx) for dx in dxes[0]] shape = [len(dx) for dx in dxes[0]]
fx, fy, fz = (shift_circ(i, shape, 1) for i in range(3)) fx, fy, fz = [shift_circ(i, shape, 1) for i in range(3)]
dxag = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[0], indexing='ij')] dxag = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[0], indexing='ij')]
dxbg = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[1], indexing='ij')] dxbg = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[1], indexing='ij')]
Ex, Ey, Ez = (ei * da for ei, da in zip(numpy.split(e, 3), dxag, strict=True)) Ex, Ey, Ez = [ei * da for ei, da in zip(numpy.split(e, 3), dxag)]
block_diags = [[ None, fx @ -Ez, fx @ Ey], block_diags = [[ None, fx @ -Ez, fx @ Ey],
[ fy @ Ez, None, fy @ -Ex], [ fy @ Ez, None, fy @ -Ex],
@ -349,11 +349,11 @@ def poynting_h_cross(h: vcfdfield_t, dxes: dx_lists_t) -> sparse.spmatrix:
""" """
shape = [len(dx) for dx in dxes[0]] shape = [len(dx) for dx in dxes[0]]
fx, fy, fz = (shift_circ(i, shape, 1) for i in range(3)) fx, fy, fz = [shift_circ(i, shape, 1) for i in range(3)]
dxag = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[0], indexing='ij')] dxag = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[0], indexing='ij')]
dxbg = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[1], indexing='ij')] dxbg = [dx.ravel(order='C') for dx in numpy.meshgrid(*dxes[1], indexing='ij')]
Hx, Hy, Hz = (sparse.diags(hi * db) for hi, db in zip(numpy.split(h, 3), dxbg, strict=True)) Hx, Hy, Hz = [sparse.diags(hi * db) for hi, db in zip(numpy.split(h, 3), dxbg)]
P = (sparse.bmat( P = (sparse.bmat(
[[ None, -Hz @ fx, Hy @ fx], [[ None, -Hz @ fx, Hy @ fx],

View File

@ -2,7 +2,7 @@
Functions for creating stretched coordinate perfectly matched layer (PML) absorbers. Functions for creating stretched coordinate perfectly matched layer (PML) absorbers.
""" """
from collections.abc import Sequence, Callable from typing import Sequence, Callable
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray

View File

@ -2,14 +2,13 @@
Solvers and solver interface for FDFD problems. Solvers and solver interface for FDFD problems.
""" """
from typing import Any from typing import Callable, Dict, Any, Optional
from collections.abc import Callable
import logging import logging
import numpy import numpy
from numpy.typing import ArrayLike, NDArray from numpy.typing import ArrayLike, NDArray
from numpy.linalg import norm from numpy.linalg import norm
import scipy.sparse.linalg import scipy.sparse.linalg # type: ignore
from ..fdmath import dx_lists_t, vfdfield_t, vcfdfield_t from ..fdmath import dx_lists_t, vfdfield_t, vcfdfield_t
from . import operators from . import operators
@ -44,8 +43,7 @@ def _scipy_qmr(
nonlocal ii nonlocal ii
ii += 1 ii += 1
if ii % 100 == 0: if ii % 100 == 0:
cur_norm = norm(A @ xk - b) logger.info('Solver residual at iteration {} : {}'.format(ii, norm(A @ xk - b)))
logger.info(f'Solver residual at iteration {ii} : {cur_norm}')
if 'callback' in kwargs: if 'callback' in kwargs:
def augmented_callback(xk: ArrayLike) -> None: def augmented_callback(xk: ArrayLike) -> None:
@ -69,12 +67,12 @@ def generic(
dxes: dx_lists_t, dxes: dx_lists_t,
J: vcfdfield_t, J: vcfdfield_t,
epsilon: vfdfield_t, epsilon: vfdfield_t,
mu: vfdfield_t | None = None, mu: Optional[vfdfield_t] = None,
pec: vfdfield_t | None = None, pec: Optional[vfdfield_t] = None,
pmc: vfdfield_t | None = None, pmc: Optional[vfdfield_t] = None,
adjoint: bool = False, adjoint: bool = False,
matrix_solver: Callable[..., ArrayLike] = _scipy_qmr, matrix_solver: Callable[..., ArrayLike] = _scipy_qmr,
matrix_solver_opts: dict[str, Any] | None = None, matrix_solver_opts: Optional[Dict[str, Any]] = None,
) -> vcfdfield_t: ) -> vcfdfield_t:
""" """
Conjugate gradient FDFD solver using CSR sparse matrices. Conjugate gradient FDFD solver using CSR sparse matrices.

View File

@ -182,10 +182,10 @@ from typing import Any
import numpy import numpy
from numpy.typing import NDArray, ArrayLike from numpy.typing import NDArray, ArrayLike
from numpy.linalg import norm from numpy.linalg import norm
from scipy import sparse import scipy.sparse as sparse # type: ignore
from ..fdmath.operators import deriv_forward, deriv_back, cross from ..fdmath.operators import deriv_forward, deriv_back, cross
from ..fdmath import vec, unvec, dx_lists_t, vfdfield_t, vcfdfield_t from ..fdmath import unvec, dx_lists_t, vfdfield_t, vcfdfield_t
from ..eigensolvers import signed_eigensolve, rayleigh_quotient_iteration from ..eigensolvers import signed_eigensolve, rayleigh_quotient_iteration
@ -253,8 +253,7 @@ def operator_e(
mu_yx = sparse.diags(numpy.hstack((mu_parts[1], mu_parts[0]))) mu_yx = sparse.diags(numpy.hstack((mu_parts[1], mu_parts[0])))
mu_z_inv = sparse.diags(1 / mu_parts[2]) mu_z_inv = sparse.diags(1 / mu_parts[2])
op = ( op = (omega * omega * mu_yx @ eps_xy
omega * omega * mu_yx @ eps_xy
+ mu_yx @ sparse.vstack((-Dby, Dbx)) @ mu_z_inv @ sparse.hstack((-Dfy, Dfx)) + mu_yx @ sparse.vstack((-Dby, Dbx)) @ mu_z_inv @ sparse.hstack((-Dfy, Dfx))
+ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby)) @ eps_xy + sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby)) @ eps_xy
) )
@ -322,8 +321,7 @@ def operator_h(
mu_xy = sparse.diags(numpy.hstack((mu_parts[0], mu_parts[1]))) mu_xy = sparse.diags(numpy.hstack((mu_parts[0], mu_parts[1])))
mu_z_inv = sparse.diags(1 / mu_parts[2]) mu_z_inv = sparse.diags(1 / mu_parts[2])
op = ( op = (omega * omega * eps_yx @ mu_xy
omega * omega * eps_yx @ mu_xy
+ eps_yx @ sparse.vstack((-Dfy, Dfx)) @ eps_z_inv @ sparse.hstack((-Dby, Dbx)) + eps_yx @ sparse.vstack((-Dfy, Dfx)) @ eps_z_inv @ sparse.hstack((-Dby, Dbx))
+ sparse.vstack((Dbx, Dby)) @ mu_z_inv @ sparse.hstack((Dfx, Dfy)) @ mu_xy + sparse.vstack((Dbx, Dby)) @ mu_z_inv @ sparse.hstack((Dfx, Dfy)) @ mu_xy
) )
@ -422,7 +420,7 @@ def _normalized_fields(
Sz_a = E[0] * numpy.conj(H[1] * phase) * dxes_real[0][1] * dxes_real[1][0] Sz_a = E[0] * numpy.conj(H[1] * phase) * dxes_real[0][1] * dxes_real[1][0]
Sz_b = E[1] * numpy.conj(H[0] * phase) * dxes_real[0][0] * dxes_real[1][1] Sz_b = E[1] * numpy.conj(H[0] * phase) * dxes_real[0][0] * dxes_real[1][1]
Sz_tavg = numpy.real(Sz_a.sum() - Sz_b.sum()) * 0.5 # 0.5 since E, H are assumed to be peak (not RMS) amplitudes Sz_tavg = numpy.real(Sz_a.sum() - Sz_b.sum()) * 0.5 # 0.5 since E, H are assumed to be peak (not RMS) amplitudes
assert Sz_tavg > 0, f'Found a mode propagating in the wrong direction! {Sz_tavg=}' assert Sz_tavg > 0, 'Found a mode propagating in the wrong direction! Sz_tavg={}'.format(Sz_tavg)
energy = epsilon * e.conj() * e energy = epsilon * e.conj() * e
@ -720,109 +718,6 @@ def e_err(
return float(norm(op) / norm(e)) return float(norm(op) / norm(e))
def sensitivity(
e_norm: vcfdfield_t,
h_norm: vcfdfield_t,
wavenumber: complex,
omega: complex,
dxes: dx_lists_t,
epsilon: vfdfield_t,
mu: vfdfield_t | None = None,
) -> vcfdfield_t:
r"""
Given a waveguide structure (`dxes`, `epsilon`, `mu`) and mode fields
(`e_norm`, `h_norm`, `wavenumber`, `omega`), calculates the sensitivity of the wavenumber
$\beta$ to changes in the dielectric structure $\epsilon$.
The output is a vector of the same size as `vec(epsilon)`, with each element specifying the
sensitivity of `wavenumber` to changes in the corresponding element in `vec(epsilon)`, i.e.
$$sens_{i} = \frac{\partial\beta}{\partial\epsilon_i}$$
An adjoint approach is used to calculate the sensitivity; the derivation is provided here:
Starting with the eigenvalue equation
$$\beta^2 E_{xy} = A_E E_{xy}$$
where $A_E$ is the waveguide operator from `operator_e()`, and $E_{xy} = \begin{bmatrix} E_x \\
E_y \end{bmatrix}$,
we can differentiate with respect to one of the $\epsilon$ elements (i.e. at one Yee grid point), $\epsilon_i$:
$$
(2 \beta) \partial_{\epsilon_i}(\beta) E_{xy} + \beta^2 \partial_{\epsilon_i} E_{xy}
= \partial_{\epsilon_i}(A_E) E_{xy} + A_E \partial_{\epsilon_i} E_{xy}
$$
We then multiply by $H_{yx}^\star = \begin{bmatrix}H_y^\star \\ -H_x^\star \end{bmatrix}$ from the left:
$$
(2 \beta) \partial_{\epsilon_i}(\beta) H_{yx}^\star E_{xy} + \beta^2 H_{yx}^\star \partial_{\epsilon_i} E_{xy}
= H_{yx}^\star \partial_{\epsilon_i}(A_E) E_{xy} + H_{yx}^\star A_E \partial_{\epsilon_i} E_{xy}
$$
However, $H_{yx}^\star$ is actually a left-eigenvector of $A_E$. This can be verified by inspecting
the form of `operator_h` ($A_H$) and comparing its conjugate transpose to `operator_e` ($A_E$). Also, note
$H_{yx}^\star \cdot E_{xy} = H^\star \times E$ recalls the mode orthogonality relation. See doi:10.5194/ars-9-85-201
for a similar approach. Therefore,
$$
H_{yx}^\star A_E \partial_{\epsilon_i} E_{xy} = \beta^2 H_{yx}^\star \partial_{\epsilon_i} E_{xy}
$$
and we can simplify to
$$
\partial_{\epsilon_i}(\beta)
= \frac{1}{2 \beta} \frac{H_{yx}^\star \partial_{\epsilon_i}(A_E) E_{xy} }{H_{yx}^\star E_{xy}}
$$
This expression can be quickly calculated for all $i$ by writing out the various terms of
$\partial_{\epsilon_i} A_E$ and recognizing that the vector-matrix-vector products (i.e. scalars)
$sens_i = \vec{v}_{left} \partial_{\epsilon_i} (\epsilon_{xyz}) \vec{v}_{right}$, indexed by $i$, can be expressed as
elementwise multiplications $\vec{sens} = \vec{v}_{left} \star \vec{v}_{right}$
Args:
e_norm: Normalized, vectorized E_xyz field for the mode. E.g. as returned by `normalized_fields_e`.
h_norm: Normalized, vectorized H_xyz field for the mode. E.g. as returned by `normalized_fields_e`.
wavenumber: Propagation constant for the mode. The z-axis is assumed to be continuous (i.e. without numerical dispersion).
omega: The angular frequency of the system.
dxes: Grid parameters `[dx_e, dx_h]` as described in `meanas.fdmath.types` (2D)
epsilon: Vectorized dielectric constant grid
mu: Vectorized magnetic permeability grid (default 1 everywhere)
Returns:
Sparse matrix representation of the operator.
"""
if mu is None:
mu = numpy.ones_like(epsilon)
Dfx, Dfy = deriv_forward(dxes[0])
Dbx, Dby = deriv_back(dxes[1])
eps_x, eps_y, eps_z = numpy.split(epsilon, 3)
eps_xy = sparse.diags(numpy.hstack((eps_x, eps_y)))
eps_z_inv = sparse.diags(1 / eps_z)
mu_x, mu_y, _mu_z = numpy.split(mu, 3)
mu_yx = sparse.diags(numpy.hstack((mu_y, mu_x)))
da_exxhyy = vec(dxes[1][0][:, None] * dxes[0][1][None, :])
da_eyyhxx = vec(dxes[1][1][None, :] * dxes[0][0][:, None])
ev_xy = numpy.concatenate(numpy.split(e_norm, 3)[:2]) * numpy.concatenate([da_exxhyy, da_eyyhxx])
hx, hy, hz = numpy.split(h_norm, 3)
hv_yx_conj = numpy.conj(numpy.concatenate([hy, -hx]))
sens_xy1 = (hv_yx_conj @ (omega * omega * mu_yx)) * ev_xy
sens_xy2 = (hv_yx_conj @ sparse.vstack((Dfx, Dfy)) @ eps_z_inv @ sparse.hstack((Dbx, Dby))) * ev_xy
sens_z = (hv_yx_conj @ sparse.vstack((Dfx, Dfy)) @ (-eps_z_inv * eps_z_inv)) * (sparse.hstack((Dbx, Dby)) @ eps_xy @ ev_xy)
norm = hv_yx_conj @ ev_xy
sens_tot = numpy.concatenate([sens_xy1 + sens_xy2, sens_z]) / (2 * wavenumber * norm)
return sens_tot
def solve_modes( def solve_modes(
mode_numbers: list[int], mode_numbers: list[int],
omega: complex, omega: complex,

View File

@ -4,11 +4,9 @@ Tools for working with waveguide modes in 3D domains.
This module relies heavily on `waveguide_2d` and mostly just transforms This module relies heavily on `waveguide_2d` and mostly just transforms
its parameters into 2D equivalents and expands the results back into 3D. its parameters into 2D equivalents and expands the results back into 3D.
""" """
from typing import Any from typing import Sequence, Any
from collections.abc import Sequence
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
from numpy import complexfloating
from ..fdmath import vec, unvec, dx_lists_t, fdfield_t, cfdfield_t from ..fdmath import vec, unvec, dx_lists_t, fdfield_t, cfdfield_t
from . import operators, waveguide_2d from . import operators, waveguide_2d
@ -23,7 +21,7 @@ def solve_mode(
slices: Sequence[slice], slices: Sequence[slice],
epsilon: fdfield_t, epsilon: fdfield_t,
mu: fdfield_t | None = None, mu: fdfield_t | None = None,
) -> dict[str, complex | NDArray[complexfloating]]: ) -> dict[str, complex | NDArray[numpy.float_]]:
""" """
Given a 3D grid, selects a slice from the grid and attempts to Given a 3D grid, selects a slice from the grid and attempts to
solve for an eigenmode propagating through that slice. solve for an eigenmode propagating through that slice.
@ -42,8 +40,8 @@ def solve_mode(
Returns: Returns:
``` ```
{ {
'E': NDArray[complexfloating], 'E': list[NDArray[numpy.float_]],
'H': NDArray[complexfloating], 'H': list[NDArray[numpy.float_]],
'wavenumber': complex, 'wavenumber': complex,
} }
``` ```

View File

@ -9,9 +9,9 @@ As the z-dependence is known, all the functions in this file assume a 2D grid
# TODO update module docs # TODO update module docs
import numpy import numpy
from scipy import sparse import scipy.sparse as sparse # type: ignore
from ..fdmath import vec, unvec, dx_lists_t, vfdfield_t, cfdfield_t from ..fdmath import vec, unvec, dx_lists_t, fdfield_t, vfdfield_t, cfdfield_t
from ..fdmath.operators import deriv_forward, deriv_back from ..fdmath.operators import deriv_forward, deriv_back
from ..eigensolvers import signed_eigensolve, rayleigh_quotient_iteration from ..eigensolvers import signed_eigensolve, rayleigh_quotient_iteration
@ -25,9 +25,6 @@ def cylindrical_operator(
""" """
Cylindrical coordinate waveguide operator of the form Cylindrical coordinate waveguide operator of the form
(NOTE: See 10.1364/OL.33.001848)
TODO: consider 10.1364/OE.20.021583
TODO TODO
for use with a field vector of the form `[E_r, E_y]`. for use with a field vector of the form `[E_r, E_y]`.

View File

@ -741,24 +741,8 @@ the true values can be multiplied back in after the simulation is complete if no
normalized results are needed. normalized results are needed.
""" """
from .types import ( from .types import fdfield_t, vfdfield_t, cfdfield_t, vcfdfield_t, dx_lists_t, dx_lists_mut
fdfield_t as fdfield_t, from .types import fdfield_updater_t, cfdfield_updater_t
vfdfield_t as vfdfield_t, from .vectorization import vec, unvec
cfdfield_t as cfdfield_t, from . import operators, functional, types, vectorization
vcfdfield_t as vcfdfield_t,
dx_lists_t as dx_lists_t,
dx_lists_mut as dx_lists_mut,
fdfield_updater_t as fdfield_updater_t,
cfdfield_updater_t as cfdfield_updater_t,
)
from .vectorization import (
vec as vec,
unvec as unvec,
)
from . import (
operators as operators,
functional as functional,
types as types,
vectorization as vectorization,
)

View File

@ -3,18 +3,16 @@ Math functions for finite difference simulations
Basic discrete calculus etc. Basic discrete calculus etc.
""" """
from typing import TypeVar from typing import Sequence, Callable
from collections.abc import Sequence, Callable
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
from numpy import floating, complexfloating
from .types import fdfield_t, fdfield_updater_t from .types import fdfield_t, fdfield_updater_t
def deriv_forward( def deriv_forward(
dx_e: Sequence[NDArray[floating]] | None = None, dx_e: Sequence[NDArray[numpy.float_]] | None = None,
) -> tuple[fdfield_updater_t, fdfield_updater_t, fdfield_updater_t]: ) -> tuple[fdfield_updater_t, fdfield_updater_t, fdfield_updater_t]:
""" """
Utility operators for taking discretized derivatives (backward variant). Utility operators for taking discretized derivatives (backward variant).
@ -38,7 +36,7 @@ def deriv_forward(
def deriv_back( def deriv_back(
dx_h: Sequence[NDArray[floating]] | None = None, dx_h: Sequence[NDArray[numpy.float_]] | None = None,
) -> tuple[fdfield_updater_t, fdfield_updater_t, fdfield_updater_t]: ) -> tuple[fdfield_updater_t, fdfield_updater_t, fdfield_updater_t]:
""" """
Utility operators for taking discretized derivatives (forward variant). Utility operators for taking discretized derivatives (forward variant).
@ -61,12 +59,9 @@ def deriv_back(
return derivs return derivs
TT = TypeVar('TT', bound='NDArray[floating | complexfloating]')
def curl_forward( def curl_forward(
dx_e: Sequence[NDArray[floating]] | None = None, dx_e: Sequence[NDArray[numpy.float_]] | None = None,
) -> Callable[[TT], TT]: ) -> fdfield_updater_t:
r""" r"""
Curl operator for use with the E field. Curl operator for use with the E field.
@ -80,7 +75,7 @@ def curl_forward(
""" """
Dx, Dy, Dz = deriv_forward(dx_e) Dx, Dy, Dz = deriv_forward(dx_e)
def ce_fun(e: TT) -> TT: def ce_fun(e: fdfield_t) -> fdfield_t:
output = numpy.empty_like(e) output = numpy.empty_like(e)
output[0] = Dy(e[2]) output[0] = Dy(e[2])
output[1] = Dz(e[0]) output[1] = Dz(e[0])
@ -94,8 +89,8 @@ def curl_forward(
def curl_back( def curl_back(
dx_h: Sequence[NDArray[floating]] | None = None, dx_h: Sequence[NDArray[numpy.float_]] | None = None,
) -> Callable[[TT], TT]: ) -> fdfield_updater_t:
r""" r"""
Create a function which takes the backward curl of a field. Create a function which takes the backward curl of a field.
@ -109,7 +104,7 @@ def curl_back(
""" """
Dx, Dy, Dz = deriv_back(dx_h) Dx, Dy, Dz = deriv_back(dx_h)
def ch_fun(h: TT) -> TT: def ch_fun(h: fdfield_t) -> fdfield_t:
output = numpy.empty_like(h) output = numpy.empty_like(h)
output[0] = Dy(h[2]) output[0] = Dy(h[2])
output[1] = Dz(h[0]) output[1] = Dz(h[0])
@ -123,7 +118,7 @@ def curl_back(
def curl_forward_parts( def curl_forward_parts(
dx_e: Sequence[NDArray[floating]] | None = None, dx_e: Sequence[NDArray[numpy.float_]] | None = None,
) -> Callable: ) -> Callable:
Dx, Dy, Dz = deriv_forward(dx_e) Dx, Dy, Dz = deriv_forward(dx_e)
@ -136,7 +131,7 @@ def curl_forward_parts(
def curl_back_parts( def curl_back_parts(
dx_h: Sequence[NDArray[floating]] | None = None, dx_h: Sequence[NDArray[numpy.float_]] | None = None,
) -> Callable: ) -> Callable:
Dx, Dy, Dz = deriv_back(dx_h) Dx, Dy, Dz = deriv_back(dx_h)

View File

@ -3,11 +3,10 @@ Matrix operators for finite difference simulations
Basic discrete calculus etc. Basic discrete calculus etc.
""" """
from collections.abc import Sequence from typing import Sequence
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
from numpy import floating import scipy.sparse as sparse # type: ignore
from scipy import sparse
from .types import vfdfield_t from .types import vfdfield_t
@ -30,12 +29,12 @@ def shift_circ(
Sparse matrix for performing the circular shift. Sparse matrix for performing the circular shift.
""" """
if len(shape) not in (2, 3): if len(shape) not in (2, 3):
raise Exception(f'Invalid shape: {shape}') raise Exception('Invalid shape: {}'.format(shape))
if axis not in range(len(shape)): if axis not in range(len(shape)):
raise Exception(f'Invalid direction: {axis}, shape is {shape}') raise Exception('Invalid direction: {}, shape is {}'.format(axis, shape))
shifts = [abs(shift_distance) if a == axis else 0 for a in range(3)] shifts = [abs(shift_distance) if a == axis else 0 for a in range(3)]
shifted_diags = [(numpy.arange(n) + s) % n for n, s in zip(shape, shifts, strict=True)] shifted_diags = [(numpy.arange(n) + s) % n for n, s in zip(shape, shifts)]
ijk = numpy.meshgrid(*shifted_diags, indexing='ij') ijk = numpy.meshgrid(*shifted_diags, indexing='ij')
n = numpy.prod(shape) n = numpy.prod(shape)
@ -70,11 +69,12 @@ def shift_with_mirror(
Sparse matrix for performing the shift-with-mirror. Sparse matrix for performing the shift-with-mirror.
""" """
if len(shape) not in (2, 3): if len(shape) not in (2, 3):
raise Exception(f'Invalid shape: {shape}') raise Exception('Invalid shape: {}'.format(shape))
if axis not in range(len(shape)): if axis not in range(len(shape)):
raise Exception(f'Invalid direction: {axis}, shape is {shape}') raise Exception('Invalid direction: {}, shape is {}'.format(axis, shape))
if shift_distance >= shape[axis]: if shift_distance >= shape[axis]:
raise Exception(f'Shift ({shift_distance}) is too large for axis {axis} of size {shape[axis]}') raise Exception('Shift ({}) is too large for axis {} of size {}'.format(
shift_distance, axis, shape[axis]))
def mirrored_range(n: int, s: int) -> NDArray[numpy.int_]: def mirrored_range(n: int, s: int) -> NDArray[numpy.int_]:
v = numpy.arange(n) + s v = numpy.arange(n) + s
@ -83,7 +83,7 @@ def shift_with_mirror(
return v return v
shifts = [shift_distance if a == axis else 0 for a in range(3)] shifts = [shift_distance if a == axis else 0 for a in range(3)]
shifted_diags = [mirrored_range(n, s) for n, s in zip(shape, shifts, strict=True)] shifted_diags = [mirrored_range(n, s) for n, s in zip(shape, shifts)]
ijk = numpy.meshgrid(*shifted_diags, indexing='ij') ijk = numpy.meshgrid(*shifted_diags, indexing='ij')
n = numpy.prod(shape) n = numpy.prod(shape)
@ -97,7 +97,7 @@ def shift_with_mirror(
def deriv_forward( def deriv_forward(
dx_e: Sequence[NDArray[floating]], dx_e: Sequence[NDArray[numpy.float_]],
) -> list[sparse.spmatrix]: ) -> list[sparse.spmatrix]:
""" """
Utility operators for taking discretized derivatives (forward variant). Utility operators for taking discretized derivatives (forward variant).
@ -124,7 +124,7 @@ def deriv_forward(
def deriv_back( def deriv_back(
dx_h: Sequence[NDArray[floating]], dx_h: Sequence[NDArray[numpy.float_]],
) -> list[sparse.spmatrix]: ) -> list[sparse.spmatrix]:
""" """
Utility operators for taking discretized derivatives (backward variant). Utility operators for taking discretized derivatives (backward variant).
@ -198,7 +198,7 @@ def avg_forward(axis: int, shape: Sequence[int]) -> sparse.spmatrix:
Sparse matrix for forward average operation. Sparse matrix for forward average operation.
""" """
if len(shape) not in (2, 3): if len(shape) not in (2, 3):
raise Exception(f'Invalid shape: {shape}') raise Exception('Invalid shape: {}'.format(shape))
n = numpy.prod(shape) n = numpy.prod(shape)
return 0.5 * (sparse.eye(n) + shift_circ(axis, shape)) return 0.5 * (sparse.eye(n) + shift_circ(axis, shape))
@ -219,7 +219,7 @@ def avg_back(axis: int, shape: Sequence[int]) -> sparse.spmatrix:
def curl_forward( def curl_forward(
dx_e: Sequence[NDArray[floating]], dx_e: Sequence[NDArray[numpy.float_]],
) -> sparse.spmatrix: ) -> sparse.spmatrix:
""" """
Curl operator for use with the E field. Curl operator for use with the E field.
@ -235,7 +235,7 @@ def curl_forward(
def curl_back( def curl_back(
dx_h: Sequence[NDArray[floating]], dx_h: Sequence[NDArray[numpy.float_]],
) -> sparse.spmatrix: ) -> sparse.spmatrix:
""" """
Curl operator for use with the H field. Curl operator for use with the H field.

View File

@ -1,26 +1,26 @@
""" """
Types shared across multiple submodules Types shared across multiple submodules
""" """
from collections.abc import Sequence, Callable, MutableSequence from typing import Sequence, Callable, MutableSequence
import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
from numpy import floating, complexfloating
# Field types # Field types
fdfield_t = NDArray[floating] fdfield_t = NDArray[numpy.float_]
"""Vector field with shape (3, X, Y, Z) (e.g. `[E_x, E_y, E_z]`)""" """Vector field with shape (3, X, Y, Z) (e.g. `[E_x, E_y, E_z]`)"""
vfdfield_t = NDArray[floating] vfdfield_t = NDArray[numpy.float_]
"""Linearized vector field (single vector of length 3*X*Y*Z)""" """Linearized vector field (single vector of length 3*X*Y*Z)"""
cfdfield_t = NDArray[complexfloating] cfdfield_t = NDArray[numpy.complex_]
"""Complex vector field with shape (3, X, Y, Z) (e.g. `[E_x, E_y, E_z]`)""" """Complex vector field with shape (3, X, Y, Z) (e.g. `[E_x, E_y, E_z]`)"""
vcfdfield_t = NDArray[complexfloating] vcfdfield_t = NDArray[numpy.complex_]
"""Linearized complex vector field (single vector of length 3*X*Y*Z)""" """Linearized complex vector field (single vector of length 3*X*Y*Z)"""
dx_lists_t = Sequence[Sequence[NDArray[floating]]] dx_lists_t = Sequence[Sequence[NDArray[numpy.float_]]]
""" """
'dxes' datastructure which contains grid cell width information in the following format: 'dxes' datastructure which contains grid cell width information in the following format:
@ -31,7 +31,7 @@ dx_lists_t = Sequence[Sequence[NDArray[floating]]]
and `dy_h[0]` is the y-width of the `y=0` cells, as used when calculating dH/dy, etc. and `dy_h[0]` is the y-width of the `y=0` cells, as used when calculating dH/dy, etc.
""" """
dx_lists_mut = MutableSequence[MutableSequence[NDArray[floating]]] dx_lists_mut = MutableSequence[MutableSequence[NDArray[numpy.float_]]]
"""Mutable version of `dx_lists_t`""" """Mutable version of `dx_lists_t`"""

View File

@ -4,8 +4,7 @@ and a 1D array representation of that field `[f_x0, f_x1, f_x2,... f_y0,... f_z0
Vectorized versions of the field use row-major (ie., C-style) ordering. Vectorized versions of the field use row-major (ie., C-style) ordering.
""" """
from typing import overload from typing import overload, Sequence
from collections.abc import Sequence
import numpy import numpy
from numpy.typing import ArrayLike from numpy.typing import ArrayLike

View File

@ -159,22 +159,8 @@ Boundary conditions
# TODO notes about boundaries / PMLs # TODO notes about boundaries / PMLs
""" """
from .base import ( from .base import maxwell_e, maxwell_h
maxwell_e as maxwell_e, from .pml import cpml_params, updates_with_cpml
maxwell_h as maxwell_h, from .energy import (poynting, poynting_divergence, energy_hstep, energy_estep,
) delta_energy_h2e, delta_energy_j)
from .pml import ( from .boundaries import conducting_boundary
cpml_params as cpml_params,
updates_with_cpml as updates_with_cpml,
)
from .energy import (
poynting as poynting,
poynting_divergence as poynting_divergence,
energy_hstep as energy_hstep,
energy_estep as energy_estep,
delta_energy_h2e as delta_energy_h2e,
delta_energy_j as delta_energy_j,
)
from .boundaries import (
conducting_boundary as conducting_boundary,
)

View File

@ -15,17 +15,13 @@ def conducting_boundary(
) -> tuple[fdfield_updater_t, fdfield_updater_t]: ) -> tuple[fdfield_updater_t, fdfield_updater_t]:
dirs = [0, 1, 2] dirs = [0, 1, 2]
if direction not in dirs: if direction not in dirs:
raise Exception(f'Invalid direction: {direction}') raise Exception('Invalid direction: {}'.format(direction))
dirs.remove(direction) dirs.remove(direction)
u, v = dirs u, v = dirs
boundary_slice: list[Any]
shifted1_slice: list[Any]
shifted2_slice: list[Any]
if polarity < 0: if polarity < 0:
boundary_slice = [slice(None)] * 3 boundary_slice = [slice(None)] * 3 # type: list[Any]
shifted1_slice = [slice(None)] * 3 shifted1_slice = [slice(None)] * 3 # type: list[Any]
boundary_slice[direction] = 0 boundary_slice[direction] = 0
shifted1_slice[direction] = 1 shifted1_slice[direction] = 1
@ -46,7 +42,7 @@ def conducting_boundary(
if polarity > 0: if polarity > 0:
boundary_slice = [slice(None)] * 3 boundary_slice = [slice(None)] * 3
shifted1_slice = [slice(None)] * 3 shifted1_slice = [slice(None)] * 3
shifted2_slice = [slice(None)] * 3 shifted2_slice = [slice(None)] * 3 # type: list[Any]
boundary_slice[direction] = -1 boundary_slice[direction] = -1
shifted1_slice[direction] = -2 shifted1_slice[direction] = -2
shifted2_slice[direction] = -3 shifted2_slice[direction] = -3
@ -68,4 +64,4 @@ def conducting_boundary(
return ep, hp return ep, hp
raise Exception(f'Bad polarity: {polarity}') raise Exception('Bad polarity: {}'.format(polarity))

View File

@ -7,8 +7,7 @@ PML implementations
""" """
# TODO retest pmls! # TODO retest pmls!
from typing import Any from typing import Callable, Sequence, Any
from collections.abc import Callable, Sequence
from copy import deepcopy from copy import deepcopy
import numpy import numpy
from numpy.typing import NDArray, DTypeLike from numpy.typing import NDArray, DTypeLike
@ -34,10 +33,10 @@ def cpml_params(
) -> dict[str, Any]: ) -> dict[str, Any]:
if axis not in range(3): if axis not in range(3):
raise Exception(f'Invalid axis: {axis}') raise Exception('Invalid axis: {}'.format(axis))
if polarity not in (-1, 1): if polarity not in (-1, 1):
raise Exception(f'Invalid polarity: {polarity}') raise Exception('Invalid polarity: {}'.format(polarity))
if thickness <= 2: if thickness <= 2:
raise Exception('It would be wise to have a pml with 4+ cells of thickness') raise Exception('It would be wise to have a pml with 4+ cells of thickness')
@ -112,7 +111,7 @@ def updates_with_cpml(
params_H: list[list[tuple[Any, Any, Any, Any]]] = deepcopy(params_E) params_H: list[list[tuple[Any, Any, Any, Any]]] = deepcopy(params_E)
for axis in range(3): for axis in range(3):
for pp, _polarity in enumerate((-1, 1)): for pp, polarity in enumerate((-1, 1)):
cpml_param = cpml_params[axis][pp] cpml_param = cpml_params[axis][pp]
if cpml_param is None: if cpml_param is None:
psi_E[axis][pp] = (None, None) psi_E[axis][pp] = (None, None)
@ -185,7 +184,7 @@ def updates_with_cpml(
def update_H( def update_H(
e: fdfield_t, e: fdfield_t,
h: fdfield_t, h: fdfield_t,
mu: fdfield_t | tuple[int, int, int] = (1, 1, 1), mu: fdfield_t = numpy.ones(3),
) -> None: ) -> None:
dyEx = Dfy(e[0]) dyEx = Dfy(e[0])
dzEx = Dfz(e[0]) dzEx = Dfz(e[0])

View File

@ -3,8 +3,7 @@
Test fixtures Test fixtures
""" """
# ruff: noqa: ARG001 from typing import Iterable, Any
from typing import Any
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
import pytest # type: ignore import pytest # type: ignore
@ -21,18 +20,18 @@ FixtureRequest = Any
(5, 5, 5), (5, 5, 5),
# (7, 7, 7), # (7, 7, 7),
]) ])
def shape(request: FixtureRequest) -> tuple[int, ...]: def shape(request: FixtureRequest) -> Iterable[tuple[int, ...]]:
return (3, *request.param) yield (3, *request.param)
@pytest.fixture(scope='module', params=[1.0, 1.5]) @pytest.fixture(scope='module', params=[1.0, 1.5])
def epsilon_bg(request: FixtureRequest) -> float: def epsilon_bg(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@pytest.fixture(scope='module', params=[1.0, 2.5]) @pytest.fixture(scope='module', params=[1.0, 2.5])
def epsilon_fg(request: FixtureRequest) -> float: def epsilon_fg(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@pytest.fixture(scope='module', params=['center', '000', 'random']) @pytest.fixture(scope='module', params=['center', '000', 'random'])
@ -41,7 +40,7 @@ def epsilon(
shape: tuple[int, ...], shape: tuple[int, ...],
epsilon_bg: float, epsilon_bg: float,
epsilon_fg: float, epsilon_fg: float,
) -> NDArray[numpy.float64]: ) -> Iterable[NDArray[numpy.float64]]:
is3d = (numpy.array(shape) == 1).sum() == 0 is3d = (numpy.array(shape) == 1).sum() == 0
if is3d: if is3d:
if request.param == '000': if request.param == '000':
@ -61,17 +60,17 @@ def epsilon(
high=max(epsilon_bg, epsilon_fg), high=max(epsilon_bg, epsilon_fg),
size=shape) size=shape)
return epsilon yield epsilon
@pytest.fixture(scope='module', params=[1.0]) # 1.5 @pytest.fixture(scope='module', params=[1.0]) # 1.5
def j_mag(request: FixtureRequest) -> float: def j_mag(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@pytest.fixture(scope='module', params=[1.0, 1.5]) @pytest.fixture(scope='module', params=[1.0, 1.5])
def dx(request: FixtureRequest) -> float: def dx(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@pytest.fixture(scope='module', params=['uniform', 'centerbig']) @pytest.fixture(scope='module', params=['uniform', 'centerbig'])
@ -79,7 +78,7 @@ def dxes(
request: FixtureRequest, request: FixtureRequest,
shape: tuple[int, ...], shape: tuple[int, ...],
dx: float, dx: float,
) -> list[list[NDArray[numpy.float64]]]: ) -> Iterable[list[list[NDArray[numpy.float64]]]]:
if request.param == 'uniform': if request.param == 'uniform':
dxes = [[numpy.full(s, dx) for s in shape[1:]] for _ in range(2)] dxes = [[numpy.full(s, dx) for s in shape[1:]] for _ in range(2)]
elif request.param == 'centerbig': elif request.param == 'centerbig':
@ -91,5 +90,5 @@ def dxes(
dxe = [PRNG.uniform(low=1.0 * dx, high=1.1 * dx, size=s) for s in shape[1:]] dxe = [PRNG.uniform(low=1.0 * dx, high=1.1 * dx, size=s) for s in shape[1:]]
dxh = [(d + numpy.roll(d, -1)) / 2 for d in dxe] dxh = [(d + numpy.roll(d, -1)) / 2 for d in dxe]
dxes = [dxe, dxh] dxes = [dxe, dxh]
return dxes yield dxes

View File

@ -1,4 +1,4 @@
# ruff: noqa: ARG001 from typing import Iterable
import dataclasses import dataclasses
import pytest # type: ignore import pytest # type: ignore
import numpy import numpy
@ -61,24 +61,24 @@ def test_poynting_planes(sim: 'FDResult') -> None:
# Also see conftest.py # Also see conftest.py
@pytest.fixture(params=[1 / 1500]) @pytest.fixture(params=[1 / 1500])
def omega(request: FixtureRequest) -> float: def omega(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@pytest.fixture(params=[None]) @pytest.fixture(params=[None])
def pec(request: FixtureRequest) -> NDArray[numpy.float64] | None: def pec(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
return request.param yield request.param
@pytest.fixture(params=[None]) @pytest.fixture(params=[None])
def pmc(request: FixtureRequest) -> NDArray[numpy.float64] | None: def pmc(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
return request.param yield request.param
#@pytest.fixture(scope='module', #@pytest.fixture(scope='module',
# params=[(25, 5, 5)]) # params=[(25, 5, 5)])
#def shape(request: FixtureRequest): #def shape(request):
# return (3, *request.param) # yield (3, *request.param)
@pytest.fixture(params=['diag']) # 'center' @pytest.fixture(params=['diag']) # 'center'
@ -86,7 +86,7 @@ def j_distribution(
request: FixtureRequest, request: FixtureRequest,
shape: tuple[int, ...], shape: tuple[int, ...],
j_mag: float, j_mag: float,
) -> NDArray[numpy.float64]: ) -> Iterable[NDArray[numpy.float64]]:
j = numpy.zeros(shape, dtype=complex) j = numpy.zeros(shape, dtype=complex)
center_mask = numpy.zeros(shape, dtype=bool) center_mask = numpy.zeros(shape, dtype=bool)
center_mask[:, shape[1] // 2, shape[2] // 2, shape[3] // 2] = True center_mask[:, shape[1] // 2, shape[2] // 2, shape[3] // 2] = True
@ -96,7 +96,7 @@ def j_distribution(
elif request.param == 'diag': elif request.param == 'diag':
j[numpy.roll(center_mask, [1, 1, 1], axis=(1, 2, 3))] = (1 + 1j) * j_mag j[numpy.roll(center_mask, [1, 1, 1], axis=(1, 2, 3))] = (1 + 1j) * j_mag
j[numpy.roll(center_mask, [-1, -1, -1], axis=(1, 2, 3))] = (1 - 1j) * j_mag j[numpy.roll(center_mask, [-1, -1, -1], axis=(1, 2, 3))] = (1 - 1j) * j_mag
return j yield j
@dataclasses.dataclass() @dataclasses.dataclass()

View File

@ -1,4 +1,4 @@
# ruff: noqa: ARG001 from typing import Iterable
import pytest # type: ignore import pytest # type: ignore
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
@ -44,30 +44,30 @@ def test_pml(sim: FDResult, src_polarity: int) -> None:
# Also see conftest.py # Also see conftest.py
@pytest.fixture(params=[1 / 1500]) @pytest.fixture(params=[1 / 1500])
def omega(request: FixtureRequest) -> float: def omega(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@pytest.fixture(params=[None]) @pytest.fixture(params=[None])
def pec(request: FixtureRequest) -> NDArray[numpy.float64] | None: def pec(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
return request.param yield request.param
@pytest.fixture(params=[None]) @pytest.fixture(params=[None])
def pmc(request: FixtureRequest) -> NDArray[numpy.float64] | None: def pmc(request: FixtureRequest) -> Iterable[NDArray[numpy.float64] | None]:
return request.param yield request.param
@pytest.fixture(params=[(30, 1, 1), @pytest.fixture(params=[(30, 1, 1),
(1, 30, 1), (1, 30, 1),
(1, 1, 30)]) (1, 1, 30)])
def shape(request: FixtureRequest) -> tuple[int, int, int]: def shape(request: FixtureRequest) -> Iterable[tuple[int, ...]]:
return (3, *request.param) yield (3, *request.param)
@pytest.fixture(params=[+1, -1]) @pytest.fixture(params=[+1, -1])
def src_polarity(request: FixtureRequest) -> int: def src_polarity(request: FixtureRequest) -> Iterable[int]:
return request.param yield request.param
@pytest.fixture() @pytest.fixture()
@ -78,7 +78,7 @@ def j_distribution(
dxes: dx_lists_mut, dxes: dx_lists_mut,
omega: float, omega: float,
src_polarity: int, src_polarity: int,
) -> NDArray[numpy.complex128]: ) -> Iterable[NDArray[numpy.complex128]]:
j = numpy.zeros(shape, dtype=complex) j = numpy.zeros(shape, dtype=complex)
dim = numpy.where(numpy.array(shape[1:]) > 1)[0][0] # Propagation axis dim = numpy.where(numpy.array(shape[1:]) > 1)[0][0] # Propagation axis
@ -106,7 +106,7 @@ def j_distribution(
j = fdfd.waveguide_3d.compute_source(E=e, wavenumber=wavenumber_corrected, omega=omega, dxes=dxes, j = fdfd.waveguide_3d.compute_source(E=e, wavenumber=wavenumber_corrected, omega=omega, dxes=dxes,
axis=dim, polarity=src_polarity, slices=slices, epsilon=epsilon) axis=dim, polarity=src_polarity, slices=slices, epsilon=epsilon)
return j yield j
@pytest.fixture() @pytest.fixture()
@ -115,9 +115,9 @@ def epsilon(
shape: tuple[int, ...], shape: tuple[int, ...],
epsilon_bg: float, epsilon_bg: float,
epsilon_fg: float, epsilon_fg: float,
) -> NDArray[numpy.float64]: ) -> Iterable[NDArray[numpy.float64]]:
epsilon = numpy.full(shape, epsilon_fg, dtype=float) epsilon = numpy.full(shape, epsilon_fg, dtype=float)
return epsilon yield epsilon
@pytest.fixture(params=['uniform']) @pytest.fixture(params=['uniform'])
@ -127,7 +127,7 @@ def dxes(
dx: float, dx: float,
omega: float, omega: float,
epsilon_fg: float, epsilon_fg: float,
) -> list[list[NDArray[numpy.float64]]]: ) -> Iterable[list[list[NDArray[numpy.float64]]]]:
if request.param == 'uniform': if request.param == 'uniform':
dxes = [[numpy.full(s, dx) for s in shape[1:]] for _ in range(2)] dxes = [[numpy.full(s, dx) for s in shape[1:]] for _ in range(2)]
dim = numpy.where(numpy.array(shape[1:]) > 1)[0][0] # Propagation axis dim = numpy.where(numpy.array(shape[1:]) > 1)[0][0] # Propagation axis
@ -141,7 +141,7 @@ def dxes(
epsilon_effective=epsilon_fg, epsilon_effective=epsilon_fg,
thickness=10, thickness=10,
) )
return dxes yield dxes
@pytest.fixture() @pytest.fixture()

View File

@ -1,5 +1,4 @@
# ruff: noqa: ARG001 from typing import Iterable, Any
from typing import Any
import dataclasses import dataclasses
import pytest # type: ignore import pytest # type: ignore
import numpy import numpy
@ -102,7 +101,7 @@ def test_poynting_divergence(sim: 'TDResult') -> None:
def test_poynting_planes(sim: 'TDResult') -> None: def test_poynting_planes(sim: 'TDResult') -> None:
mask = (sim.js[0] != 0).any(axis=0) mask = (sim.js[0] != 0).any(axis=0)
if mask.sum() > 1: if mask.sum() > 1:
pytest.skip(f'test_poynting_planes can only test single point sources, got {mask.sum()}') pytest.skip('test_poynting_planes can only test single point sources, got {}'.format(mask.sum()))
args: dict[str, Any] = { args: dict[str, Any] = {
'dxes': sim.dxes, 'dxes': sim.dxes,
@ -151,8 +150,8 @@ def test_poynting_planes(sim: 'TDResult') -> None:
@pytest.fixture(params=[0.3]) @pytest.fixture(params=[0.3])
def dt(request: FixtureRequest) -> float: def dt(request: FixtureRequest) -> Iterable[float]:
return request.param yield request.param
@dataclasses.dataclass() @dataclasses.dataclass()
@ -169,8 +168,8 @@ class TDResult:
@pytest.fixture(params=[(0, 4, 8)]) # (0,) @pytest.fixture(params=[(0, 4, 8)]) # (0,)
def j_steps(request: FixtureRequest) -> tuple[int, ...]: def j_steps(request: FixtureRequest) -> Iterable[tuple[int, ...]]:
return request.param yield request.param
@pytest.fixture(params=['center', 'random']) @pytest.fixture(params=['center', 'random'])
@ -178,7 +177,7 @@ def j_distribution(
request: FixtureRequest, request: FixtureRequest,
shape: tuple[int, ...], shape: tuple[int, ...],
j_mag: float, j_mag: float,
) -> NDArray[numpy.float64]: ) -> Iterable[NDArray[numpy.float64]]:
j = numpy.zeros(shape) j = numpy.zeros(shape)
if request.param == 'center': if request.param == 'center':
j[:, shape[1] // 2, shape[2] // 2, shape[3] // 2] = j_mag j[:, shape[1] // 2, shape[2] // 2, shape[3] // 2] = j_mag
@ -186,7 +185,7 @@ def j_distribution(
j[:, 0, 0, 0] = j_mag j[:, 0, 0, 0] = j_mag
elif request.param == 'random': elif request.param == 'random':
j[:] = PRNG.uniform(low=-j_mag, high=j_mag, size=shape) j[:] = PRNG.uniform(low=-j_mag, high=j_mag, size=shape)
return j yield j
@pytest.fixture() @pytest.fixture()
@ -200,7 +199,8 @@ def sim(
j_steps: tuple[int, ...], j_steps: tuple[int, ...],
) -> TDResult: ) -> TDResult:
is3d = (numpy.array(shape) == 1).sum() == 0 is3d = (numpy.array(shape) == 1).sum() == 0
if is3d and dt != 0.3: if is3d:
if dt != 0.3:
pytest.skip('Skipping dt != 0.3 because test is 3D (for speed)') pytest.skip('Skipping dt != 0.3 because test is 3D (for speed)')
sim = TDResult( sim = TDResult(

View File

@ -1,3 +1,5 @@
from typing import Any
import numpy import numpy
from numpy.typing import NDArray from numpy.typing import NDArray
@ -8,25 +10,22 @@ PRNG = numpy.random.RandomState(12345)
def assert_fields_close( def assert_fields_close(
x: NDArray, x: NDArray,
y: NDArray, y: NDArray,
*args, *args: Any,
**kwargs, **kwargs: Any,
) -> None: ) -> None:
x_disp = numpy.moveaxis(x, -1, 0)
y_disp = numpy.moveaxis(y, -1, 0)
numpy.testing.assert_allclose( numpy.testing.assert_allclose(
x, # type: ignore x, y, verbose=False, # type: ignore
y, # type: ignore err_msg='Fields did not match:\n{}\n{}'.format(numpy.moveaxis(x, -1, 0),
numpy.moveaxis(y, -1, 0)),
*args, *args,
verbose=False,
err_msg=f'Fields did not match:\n{x_disp}\n{y_disp}',
**kwargs, **kwargs,
) )
def assert_close( def assert_close(
x: NDArray, x: NDArray,
y: NDArray, y: NDArray,
*args, *args: Any,
**kwargs, **kwargs: Any,
) -> None: ) -> None:
numpy.testing.assert_allclose(x, y, *args, **kwargs) numpy.testing.assert_allclose(x, y, *args, **kwargs)

View File

@ -39,7 +39,7 @@ include = [
] ]
dynamic = ["version"] dynamic = ["version"]
dependencies = [ dependencies = [
"numpy~=1.26", "numpy~=1.21",
"scipy", "scipy",
] ]
@ -51,48 +51,3 @@ path = "meanas/__init__.py"
dev = ["pytest", "pdoc", "gridlock"] dev = ["pytest", "pdoc", "gridlock"]
examples = ["gridlock"] examples = ["gridlock"]
test = ["pytest"] test = ["pytest"]
[tool.ruff]
exclude = [
".git",
"dist",
]
line-length = 245
indent-width = 4
lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
lint.select = [
"NPY", "E", "F", "W", "B", "ANN", "UP", "SLOT", "SIM", "LOG",
"C4", "ISC", "PIE", "PT", "RET", "TCH", "PTH", "INT",
"ARG", "PL", "R", "TRY",
"G010", "G101", "G201", "G202",
"Q002", "Q003", "Q004",
]
lint.ignore = [
#"ANN001", # No annotation
"ANN002", # *args
"ANN003", # **kwargs
"ANN401", # Any
"ANN101", # self: Self
"SIM108", # single-line if / else assignment
"RET504", # x=y+z; return x
"PIE790", # unnecessary pass
"ISC003", # non-implicit string concatenation
"C408", # dict(x=y) instead of {'x': y}
"PLR09", # Too many xxx
"PLR2004", # magic number
"PLC0414", # import x as x
"TRY003", # Long exception message
"TRY002", # Exception()
]
[[tool.mypy.overrides]]
module = [
"scipy",
"scipy.optimize",
"scipy.linalg",
"scipy.sparse",
"scipy.sparse.linalg",
]
ignore_missing_imports = true