Poisson Equation
PINNs: Poisson Equation (PyTorch)¶
The One-Dimensional Poisson Equation¶
We solve the one-dimensional Poisson equation with homogeneous Dirichlet boundary conditions:
$$ -\frac{d^2 u}{dx^2} = f(x), \quad x \in (0, 1) $$
with boundary conditions $u(0) = 0$ and $u(1) = 0$.
We choose the source term $f(x) = 4\pi^2 \sin(2\pi x)$, which yields the exact solution:
$$ u(x) = \sin(2\pi x) $$
We can verify by direct substitution: $-\frac{d^2}{dx^2}[\sin(2\pi x)] = 4\pi^2 \sin(2\pi x) = f(x)$, and $\sin(0) = \sin(2\pi) = 0$.
The task: given only $N = 5$ sparse measurements of $u(x)$ in the interior of the domain (between $x = 0.25$ and $x = 0.75$), reconstruct the full solution over $[0, 1]$.
We compare two approaches:
- A standard neural network trained on data alone
- A physics-informed neural network (PINN) that also enforces the PDE
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import base64
from IPython.display import HTML, display
import imageio
from tqdm import tqdm
import os
# For reproducibility
torch.manual_seed(42)
np.random.seed(42)
# Figure save directory (relative to notebook location)
FIGS_DIR = '../../sciml-book/chapters/04-pinns/figs'
os.makedirs(FIGS_DIR, exist_ok=True)
class PoissonProblem:
"""One-dimensional Poisson equation: -u'' = f on (0,1), u(0)=u(1)=0."""
def __init__(self, source_freq=1.0):
self.freq = source_freq # f(x) = (freq*pi)^2 sin(freq*pi*x)
def exact_solution(self, x):
return np.sin(self.freq * np.pi * x)
def source_term(self, x):
return (self.freq * np.pi)**2 * np.sin(self.freq * np.pi * x)
def exact_derivative(self, x):
return self.freq * np.pi * np.cos(self.freq * np.pi * x)
def exact_second_derivative(self, x):
return -(self.freq * np.pi)**2 * np.sin(self.freq * np.pi * x)
def generate_data(self, num_domain_points=500, num_data_points=5,
data_x_min=0.25, data_x_max=0.75):
"""Generate domain points and sparse training data."""
x_domain = np.linspace(0, 1, num_domain_points).reshape(-1, 1)
u_exact = self.exact_solution(x_domain).reshape(-1, 1)
# Sparse data points (interior only, concentrated in the middle)
x_data = np.linspace(data_x_min, data_x_max, num_data_points).reshape(-1, 1)
u_data = self.exact_solution(x_data).reshape(-1, 1)
return x_domain, u_exact, x_data, u_data
class Visualizer:
"""Plotting utilities for NN vs PINN comparison."""
def __init__(self):
plt.style.use('default')
def plot_result(self, x, u_exact, x_data, u_data, u_pred,
x_colloc=None, iteration=None, title_prefix=''):
"""Plot prediction vs exact solution. Returns image array for animation."""
fig, ax = plt.subplots(figsize=(10, 5))
ax.grid(False)
for spine in ax.spines.values():
spine.set_visible(True)
ax.plot(x, u_exact, color='gray', linewidth=2, linestyle='--',
label='Exact Solution')
ax.plot(x, u_pred, color='blue', linewidth=2,
label='Neural Network Prediction')
if x_data.size > 0:
ax.scatter(x_data, u_data, color='red', s=80, zorder=3,
label='Training Data')
if x_colloc is not None and len(x_colloc) > 0:
ax.scatter(x_colloc, np.zeros_like(x_colloc) - 0.05,
color='green', s=40, marker='^', alpha=0.6, zorder=3,
label='Collocation Points')
if iteration is not None:
ax.set_title(f'{title_prefix}Training Step: {iteration+1}')
ax.legend(frameon=True, facecolor='white', edgecolor='black')
ax.set_xlabel('x')
ax.set_ylabel('u(x)')
ax.set_xlim([0, 1])
ax.set_ylim([-1.5, 1.5])
ax.set_facecolor('white')
fig.patch.set_facecolor('white')
fig.canvas.draw()
image = np.frombuffer(fig.canvas.buffer_rgba(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (4,))
plt.close(fig)
return image
def create_animation(self, frames, filename='animation.gif'):
if frames:
imageio.mimsave(filename, frames, fps=5, loop=0)
def display_animation(self, filename='animation.gif'):
try:
with open(filename, 'rb') as f:
data = f.read()
data_url = 'data:image/gif;base64,' + base64.b64encode(data).decode()
display(HTML(f'<img src="{data_url}">'))
except FileNotFoundError:
print(f'Animation file {filename} not found.')
class NeuralNetwork(nn.Module):
"""Feedforward neural network with tanh activation."""
def __init__(self, layer_sizes):
super(NeuralNetwork, self).__init__()
layers = []
for i in range(len(layer_sizes) - 1):
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
if i < len(layer_sizes) - 2:
layers.append(nn.Tanh())
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
Attempt 1: Standard Neural Network (Data Only)¶
We first train a standard neural network on the sparse data alone, with no physics. The network minimizes the data loss:
$$ \mathcal{L}_{\text{data}}(\theta) = \frac{1}{N}\sum_{i=1}^N |\hat{u}(x_i; \theta) - u_i|^2 $$
def train_standard_nn(problem, visualizer,
num_domain_points=500, num_data_points=5):
"""Train a standard NN on sparse data (no physics)."""
print('Training Standard Neural Network (data only)...')
x_domain_np, u_exact_np, x_data_np, u_data_np = problem.generate_data(
num_domain_points=num_domain_points,
num_data_points=num_data_points
)
# Network: 3 hidden layers, 32 neurons each (matches chapter description)
layer_sizes = [1, 32, 32, 32, 1]
model = NeuralNetwork(layer_sizes)
x_data_t = torch.tensor(x_data_np, dtype=torch.float32)
u_data_t = torch.tensor(u_data_np, dtype=torch.float32)
x_domain_t = torch.tensor(x_domain_np, dtype=torch.float32)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.MSELoss()
num_steps = 20000
frames = []
loss_history = []
for step in tqdm(range(num_steps), desc='Training Standard NN'):
model.train()
optimizer.zero_grad()
u_pred = model(x_data_t)
loss = criterion(u_pred, u_data_t)
loss.backward()
optimizer.step()
loss_history.append(loss.item())
if (step + 1) % 1500 == 0 or step == 0:
model.eval()
with torch.no_grad():
u_pred_full = model(x_domain_t).numpy()
image = visualizer.plot_result(
x_domain_np, u_exact_np, x_data_np, u_data_np,
u_pred_full, iteration=step
)
frames.append(image)
visualizer.create_animation(frames, filename='poisson_nn.gif')
visualizer.display_animation('poisson_nn.gif')
model.eval()
return model, x_domain_t, x_domain_np, u_exact_np, x_data_np, u_data_np, loss_history
# Initialize problem and visualizer
problem = PoissonProblem(source_freq=2.0)
visualizer = Visualizer()
# Train standard NN
(model_nn, x_domain_t, x_domain_np, u_exact_np,
x_data_np, u_data_np, loss_nn) = train_standard_nn(problem, visualizer)
Training Standard Neural Network (data only)...
Training Standard NN: 0%| | 0/20000 [00:00<?, ?it/s]
Training Standard NN: 0%| | 1/20000 [00:00<36:51, 9.04it/s]
Training Standard NN: 3%|▎ | 521/20000 [00:00<00:06, 2932.77it/s]
Training Standard NN: 5%|▌ | 1053/20000 [00:00<00:04, 3995.47it/s]
Training Standard NN: 8%|▊ | 1500/20000 [00:00<00:04, 4176.37it/s]
Training Standard NN: 10%|█ | 2037/20000 [00:00<00:03, 4601.34it/s]
Training Standard NN: 13%|█▎ | 2573/20000 [00:00<00:03, 4855.39it/s]
Training Standard NN: 15%|█▌ | 3062/20000 [00:00<00:03, 4728.23it/s]
Training Standard NN: 18%|█▊ | 3594/20000 [00:00<00:03, 4909.92it/s]
Training Standard NN: 21%|██ | 4124/20000 [00:00<00:03, 5027.84it/s]
Training Standard NN: 23%|██▎ | 4629/20000 [00:01<00:03, 4843.12it/s]
Training Standard NN: 26%|██▌ | 5168/20000 [00:01<00:02, 5002.85it/s]
Training Standard NN: 28%|██▊ | 5698/20000 [00:01<00:02, 5089.71it/s]
Training Standard NN: 31%|███ | 6209/20000 [00:01<00:02, 4900.26it/s]
Training Standard NN: 34%|███▎ | 6743/20000 [00:01<00:02, 5025.62it/s]
Training Standard NN: 36%|███▋ | 7274/20000 [00:01<00:02, 5108.75it/s]
Training Standard NN: 39%|███▉ | 7787/20000 [00:01<00:02, 4928.50it/s]
Training Standard NN: 42%|████▏ | 8322/20000 [00:01<00:02, 5049.52it/s]
Training Standard NN: 44%|████▍ | 8858/20000 [00:01<00:02, 5139.03it/s]
Training Standard NN: 47%|████▋ | 9374/20000 [00:01<00:02, 4940.85it/s]
Training Standard NN: 50%|████▉ | 9905/20000 [00:02<00:02, 5046.40it/s]
Training Standard NN: 52%|█████▏ | 10440/20000 [00:02<00:01, 5133.51it/s]
Training Standard NN: 55%|█████▍ | 10956/20000 [00:02<00:01, 4915.22it/s]
Training Standard NN: 57%|█████▋ | 11490/20000 [00:02<00:01, 5036.70it/s]
Training Standard NN: 60%|██████ | 12000/20000 [00:02<00:01, 4882.33it/s]
Training Standard NN: 63%|██████▎ | 12538/20000 [00:02<00:01, 5022.95it/s]
Training Standard NN: 65%|██████▌ | 13076/20000 [00:02<00:01, 5124.63it/s]
Training Standard NN: 68%|██████▊ | 13591/20000 [00:02<00:01, 4947.20it/s]
Training Standard NN: 71%|███████ | 14129/20000 [00:02<00:01, 5069.01it/s]
Training Standard NN: 73%|███████▎ | 14668/20000 [00:03<00:01, 5160.63it/s]
Training Standard NN: 76%|███████▌ | 15186/20000 [00:03<00:00, 4964.57it/s]
Training Standard NN: 79%|███████▊ | 15717/20000 [00:03<00:00, 5061.20it/s]
Training Standard NN: 81%|████████▏ | 16256/20000 [00:03<00:00, 5156.09it/s]
Training Standard NN: 84%|████████▍ | 16774/20000 [00:03<00:00, 4920.45it/s]
Training Standard NN: 86%|████████▋ | 17295/20000 [00:03<00:00, 5000.75it/s]
Training Standard NN: 89%|████████▉ | 17817/20000 [00:03<00:00, 5061.03it/s]
Training Standard NN: 92%|█████████▏| 18326/20000 [00:03<00:00, 4847.97it/s]
Training Standard NN: 94%|█████████▍| 18825/20000 [00:03<00:00, 4887.88it/s]
Training Standard NN: 97%|█████████▋| 19323/20000 [00:03<00:00, 4912.09it/s]
Training Standard NN: 99%|█████████▉| 19816/20000 [00:04<00:00, 4677.54it/s]
Training Standard NN: 100%|██████████| 20000/20000 [00:04<00:00, 4857.35it/s]
Attempt 2: Physics-Informed Neural Network (PINN)¶
Now we add the physics. The PINN loss combines data, physics, and boundary terms:
$$ \mathcal{L}_{\text{total}} = \mathcal{L}_{\text{data}} + \lambda_{\text{phys}} \mathcal{L}_{\text{physics}} + \lambda_{\text{BC}} \mathcal{L}_{\text{BC}} $$
where the physics loss penalizes the PDE residual at collocation points:
$$ \mathcal{L}_{\text{physics}} = \frac{1}{N_c} \sum_{j=1}^{N_c} \left| -\frac{d^2 \hat{u}}{dx^2}(x_j^c) - f(x_j^c) \right|^2 $$
def pinn_loss(model, x_data, u_data, x_colloc, x_bc, source_fn,
lambda_phys=1.0, lambda_bc=10.0):
"""Compute the combined PINN loss."""
# Data loss
if x_data.numel() > 0:
u_pred_data = model(x_data)
loss_data = nn.MSELoss()(u_pred_data, u_data)
else:
loss_data = torch.tensor(0.0)
# Physics loss: -u'' - f = 0
x_c = x_colloc.clone().detach().requires_grad_(True)
u_c = model(x_c)
du_dx = torch.autograd.grad(
u_c, x_c, grad_outputs=torch.ones_like(u_c),
create_graph=True
)[0]
d2u_dx2 = torch.autograd.grad(
du_dx, x_c, grad_outputs=torch.ones_like(du_dx),
create_graph=True
)[0]
# Source term f(x) = pi^2 sin(pi x)
f_val = source_fn(x_c)
residual = -d2u_dx2 - f_val
loss_physics = torch.mean(residual ** 2)
# Boundary loss: u(0) = 0, u(1) = 0
u_left = model(x_bc[0:1])
u_right = model(x_bc[1:2])
loss_bc = u_left**2 + u_right**2
loss_bc = loss_bc.squeeze()
total = loss_data + lambda_phys * loss_physics + lambda_bc * loss_bc
return total, loss_data.item(), loss_physics.item(), loss_bc.item()
def train_pinn(problem, visualizer,
num_domain_points=500, num_data_points=5,
num_colloc_points=50,
lambda_phys=1.0, lambda_bc=10.0):
"""Train a PINN with data + physics + boundary losses."""
print('Training Physics-Informed Neural Network...')
x_domain_np, u_exact_np, x_data_np, u_data_np = problem.generate_data(
num_domain_points=num_domain_points,
num_data_points=num_data_points
)
# Collocation points spanning the full domain
x_colloc_np = np.linspace(0.01, 0.99, num_colloc_points).reshape(-1, 1)
# Network: same architecture as standard NN
layer_sizes = [1, 32, 32, 32, 1]
model = NeuralNetwork(layer_sizes)
x_data_t = torch.tensor(x_data_np, dtype=torch.float32)
u_data_t = torch.tensor(u_data_np, dtype=torch.float32)
x_domain_t = torch.tensor(x_domain_np, dtype=torch.float32)
x_colloc_t = torch.tensor(x_colloc_np, dtype=torch.float32)
x_bc_t = torch.tensor([[0.0], [1.0]], dtype=torch.float32)
# Source term as a torch function
freq = problem.freq
source_fn = lambda x: (freq * np.pi)**2 * torch.sin(freq * np.pi * x)
# Adam followed by L-BFGS
optimizer_adam = optim.Adam(model.parameters(), lr=1e-3)
num_adam_steps = 15000
num_lbfgs_steps = 200
frames = []
loss_history = []
# Phase 1: Adam
for step in tqdm(range(num_adam_steps), desc='PINN Adam'):
model.train()
optimizer_adam.zero_grad()
loss, ld, lp, lb = pinn_loss(
model, x_data_t, u_data_t, x_colloc_t, x_bc_t, source_fn,
lambda_phys=lambda_phys, lambda_bc=lambda_bc
)
loss.backward()
optimizer_adam.step()
loss_history.append(loss.item())
if (step + 1) % 1500 == 0 or step == 0:
model.eval()
with torch.no_grad():
u_pred_full = model(x_domain_t).numpy()
image = visualizer.plot_result(
x_domain_np, u_exact_np, x_data_np, u_data_np,
u_pred_full, x_colloc=x_colloc_np, iteration=step
)
frames.append(image)
# Phase 2: L-BFGS refinement
optimizer_lbfgs = optim.LBFGS(
model.parameters(), lr=0.5,
max_iter=20, history_size=50,
tolerance_grad=1e-9, tolerance_change=1e-11
)
for step in tqdm(range(num_lbfgs_steps), desc='PINN L-BFGS'):
current_loss = [None]
def closure():
optimizer_lbfgs.zero_grad()
loss, _, _, _ = pinn_loss(
model, x_data_t, u_data_t, x_colloc_t, x_bc_t, source_fn,
lambda_phys=lambda_phys, lambda_bc=lambda_bc
)
loss.backward()
current_loss[0] = loss.item()
return loss
optimizer_lbfgs.step(closure)
if current_loss[0] is not None:
loss_history.append(current_loss[0])
# Final frame
model.eval()
with torch.no_grad():
u_pred_full = model(x_domain_t).numpy()
image = visualizer.plot_result(
x_domain_np, u_exact_np, x_data_np, u_data_np,
u_pred_full, x_colloc=x_colloc_np,
iteration=num_adam_steps + num_lbfgs_steps - 1
)
frames.append(image)
visualizer.create_animation(frames, filename='poisson_pinn.gif')
visualizer.display_animation('poisson_pinn.gif')
model.eval()
return model, x_domain_t, x_domain_np, u_exact_np, x_data_np, u_data_np, x_colloc_np, loss_history
# Train PINN
(model_pinn, _, x_domain_np, u_exact_np,
x_data_np, u_data_np, x_colloc_np, loss_pinn) = train_pinn(problem, visualizer)
Training Physics-Informed Neural Network...
PINN Adam: 0%| | 0/15000 [00:00<?, ?it/s]
PINN Adam: 1%| | 109/15000 [00:00<00:13, 1085.03it/s]
PINN Adam: 2%|▏ | 260/15000 [00:00<00:11, 1332.29it/s]
PINN Adam: 3%|▎ | 414/15000 [00:00<00:10, 1425.99it/s]
PINN Adam: 4%|▍ | 566/15000 [00:00<00:09, 1462.98it/s]
PINN Adam: 5%|▍ | 721/15000 [00:00<00:09, 1492.70it/s]
PINN Adam: 6%|▌ | 877/15000 [00:00<00:09, 1513.06it/s]
PINN Adam: 7%|▋ | 1033/15000 [00:00<00:09, 1527.47it/s]
PINN Adam: 8%|▊ | 1187/15000 [00:00<00:09, 1529.11it/s]
PINN Adam: 9%|▉ | 1341/15000 [00:00<00:08, 1530.73it/s]
PINN Adam: 10%|▉ | 1496/15000 [00:01<00:08, 1534.23it/s]
PINN Adam: 11%|█ | 1650/15000 [00:01<00:09, 1437.64it/s]
PINN Adam: 12%|█▏ | 1798/15000 [00:01<00:09, 1449.50it/s]
PINN Adam: 13%|█▎ | 1948/15000 [00:01<00:08, 1461.24it/s]
PINN Adam: 14%|█▍ | 2095/15000 [00:01<00:08, 1460.99it/s]
PINN Adam: 15%|█▍ | 2242/15000 [00:01<00:08, 1461.53it/s]
PINN Adam: 16%|█▌ | 2391/15000 [00:01<00:08, 1467.26it/s]
PINN Adam: 17%|█▋ | 2538/15000 [00:01<00:08, 1461.85it/s]
PINN Adam: 18%|█▊ | 2686/15000 [00:01<00:08, 1465.73it/s]
PINN Adam: 19%|█▉ | 2833/15000 [00:01<00:08, 1466.22it/s]
PINN Adam: 20%|█▉ | 2987/15000 [00:02<00:08, 1486.17it/s]
PINN Adam: 21%|██ | 3136/15000 [00:02<00:08, 1418.44it/s]
PINN Adam: 22%|██▏ | 3290/15000 [00:02<00:08, 1452.69it/s]
PINN Adam: 23%|██▎ | 3445/15000 [00:02<00:07, 1478.43it/s]
PINN Adam: 24%|██▍ | 3600/15000 [00:02<00:07, 1496.39it/s]
PINN Adam: 25%|██▌ | 3755/15000 [00:02<00:07, 1510.27it/s]
PINN Adam: 26%|██▌ | 3909/15000 [00:02<00:07, 1517.82it/s]
PINN Adam: 27%|██▋ | 4064/15000 [00:02<00:07, 1525.30it/s]
PINN Adam: 28%|██▊ | 4218/15000 [00:02<00:07, 1528.56it/s]
PINN Adam: 29%|██▉ | 4371/15000 [00:02<00:06, 1528.14it/s]
PINN Adam: 30%|███ | 4524/15000 [00:03<00:07, 1450.85it/s]
PINN Adam: 31%|███ | 4679/15000 [00:03<00:06, 1476.87it/s]
PINN Adam: 32%|███▏ | 4834/15000 [00:03<00:06, 1495.88it/s]
PINN Adam: 33%|███▎ | 4990/15000 [00:03<00:06, 1513.34it/s]
PINN Adam: 34%|███▍ | 5144/15000 [00:03<00:06, 1521.17it/s]
PINN Adam: 35%|███▌ | 5299/15000 [00:03<00:06, 1529.11it/s]
PINN Adam: 36%|███▋ | 5453/15000 [00:03<00:06, 1532.05it/s]
PINN Adam: 37%|███▋ | 5608/15000 [00:03<00:06, 1536.04it/s]
PINN Adam: 38%|███▊ | 5763/15000 [00:03<00:06, 1537.39it/s]
PINN Adam: 39%|███▉ | 5918/15000 [00:03<00:05, 1540.63it/s]
PINN Adam: 40%|████ | 6073/15000 [00:04<00:06, 1463.91it/s]
PINN Adam: 42%|████▏ | 6228/15000 [00:04<00:05, 1488.23it/s]
PINN Adam: 43%|████▎ | 6384/15000 [00:04<00:05, 1507.88it/s]
PINN Adam: 44%|████▎ | 6538/15000 [00:04<00:05, 1516.85it/s]
PINN Adam: 45%|████▍ | 6694/15000 [00:04<00:05, 1528.45it/s]
PINN Adam: 46%|████▌ | 6849/15000 [00:04<00:05, 1534.65it/s]
PINN Adam: 47%|████▋ | 7003/15000 [00:04<00:05, 1534.37it/s]
PINN Adam: 48%|████▊ | 7158/15000 [00:04<00:05, 1536.28it/s]
PINN Adam: 49%|████▊ | 7312/15000 [00:04<00:05, 1533.22it/s]
PINN Adam: 50%|████▉ | 7467/15000 [00:04<00:04, 1536.83it/s]
PINN Adam: 51%|█████ | 7621/15000 [00:05<00:05, 1448.98it/s]
PINN Adam: 52%|█████▏ | 7775/15000 [00:05<00:04, 1474.18it/s]
PINN Adam: 53%|█████▎ | 7929/15000 [00:05<00:04, 1493.27it/s]
PINN Adam: 54%|█████▍ | 8084/15000 [00:05<00:04, 1507.45it/s]
PINN Adam: 55%|█████▍ | 8238/15000 [00:05<00:04, 1515.04it/s]
PINN Adam: 56%|█████▌ | 8393/15000 [00:05<00:04, 1524.27it/s]
PINN Adam: 57%|█████▋ | 8548/15000 [00:05<00:04, 1531.04it/s]
PINN Adam: 58%|█████▊ | 8704/15000 [00:05<00:04, 1537.59it/s]
PINN Adam: 59%|█████▉ | 8858/15000 [00:05<00:04, 1534.13it/s]
PINN Adam: 60%|██████ | 9012/15000 [00:06<00:04, 1456.42it/s]
PINN Adam: 61%|██████ | 9165/15000 [00:06<00:03, 1475.10it/s]
PINN Adam: 62%|██████▏ | 9319/15000 [00:06<00:03, 1492.78it/s]
PINN Adam: 63%|██████▎ | 9473/15000 [00:06<00:03, 1503.97it/s]
PINN Adam: 64%|██████▍ | 9629/15000 [00:06<00:03, 1517.28it/s]
PINN Adam: 65%|██████▌ | 9783/15000 [00:06<00:03, 1523.64it/s]
PINN Adam: 66%|██████▌ | 9937/15000 [00:06<00:03, 1527.62it/s]
PINN Adam: 67%|██████▋ | 10091/15000 [00:06<00:03, 1530.33it/s]
PINN Adam: 68%|██████▊ | 10246/15000 [00:06<00:03, 1534.40it/s]
PINN Adam: 69%|██████▉ | 10400/15000 [00:06<00:03, 1528.63it/s]
PINN Adam: 70%|███████ | 10553/15000 [00:07<00:03, 1443.86it/s]
PINN Adam: 71%|███████▏ | 10707/15000 [00:07<00:02, 1469.98it/s]
PINN Adam: 72%|███████▏ | 10862/15000 [00:07<00:02, 1492.65it/s]
PINN Adam: 73%|███████▎ | 11016/15000 [00:07<00:02, 1504.89it/s]
PINN Adam: 74%|███████▍ | 11169/15000 [00:07<00:02, 1511.55it/s]
PINN Adam: 75%|███████▌ | 11323/15000 [00:07<00:02, 1517.44it/s]
PINN Adam: 77%|███████▋ | 11477/15000 [00:07<00:02, 1521.42it/s]
PINN Adam: 78%|███████▊ | 11631/15000 [00:07<00:02, 1525.22it/s]
PINN Adam: 79%|███████▊ | 11784/15000 [00:07<00:02, 1519.02it/s]
PINN Adam: 80%|███████▉ | 11936/15000 [00:07<00:02, 1498.52it/s]
PINN Adam: 81%|████████ | 12086/15000 [00:08<00:02, 1399.78it/s]
PINN Adam: 82%|████████▏ | 12232/15000 [00:08<00:01, 1416.64it/s]
PINN Adam: 83%|████████▎ | 12378/15000 [00:08<00:01, 1426.89it/s]
PINN Adam: 83%|████████▎ | 12524/15000 [00:08<00:01, 1436.38it/s]
PINN Adam: 84%|████████▍ | 12671/15000 [00:08<00:01, 1445.86it/s]
PINN Adam: 85%|████████▌ | 12821/15000 [00:08<00:01, 1461.75it/s]
PINN Adam: 86%|████████▋ | 12973/15000 [00:08<00:01, 1477.56it/s]
PINN Adam: 87%|████████▋ | 13124/15000 [00:08<00:01, 1484.78it/s]
PINN Adam: 88%|████████▊ | 13275/15000 [00:08<00:01, 1490.26it/s]
PINN Adam: 90%|████████▉ | 13427/15000 [00:08<00:01, 1498.72it/s]
PINN Adam: 91%|█████████ | 13577/15000 [00:09<00:01, 1422.86it/s]
PINN Adam: 92%|█████████▏| 13730/15000 [00:09<00:00, 1453.40it/s]
PINN Adam: 93%|█████████▎| 13883/15000 [00:09<00:00, 1474.51it/s]
PINN Adam: 94%|█████████▎| 14038/15000 [00:09<00:00, 1495.74it/s]
PINN Adam: 95%|█████████▍| 14191/15000 [00:09<00:00, 1503.56it/s]
PINN Adam: 96%|█████████▌| 14346/15000 [00:09<00:00, 1514.53it/s]
PINN Adam: 97%|█████████▋| 14498/15000 [00:09<00:00, 1513.10it/s]
PINN Adam: 98%|█████████▊| 14651/15000 [00:09<00:00, 1515.04it/s]
PINN Adam: 99%|█████████▊| 14805/15000 [00:09<00:00, 1520.90it/s]
PINN Adam: 100%|█████████▉| 14960/15000 [00:10<00:00, 1529.11it/s]
PINN Adam: 100%|██████████| 15000/15000 [00:10<00:00, 1491.00it/s]
PINN L-BFGS: 0%| | 0/200 [00:00<?, ?it/s]
PINN L-BFGS: 4%|▎ | 7/200 [00:00<00:02, 64.83it/s]
PINN L-BFGS: 8%|▊ | 15/200 [00:00<00:02, 73.39it/s]
PINN L-BFGS: 12%|█▏ | 23/200 [00:00<00:02, 75.41it/s]
PINN L-BFGS: 16%|█▌ | 31/200 [00:00<00:02, 72.46it/s]
PINN L-BFGS: 20%|█▉ | 39/200 [00:00<00:02, 71.23it/s]
PINN L-BFGS: 24%|██▎ | 47/200 [00:00<00:02, 69.87it/s]
PINN L-BFGS: 28%|██▊ | 55/200 [00:00<00:02, 70.70it/s]
PINN L-BFGS: 32%|███▏ | 63/200 [00:00<00:01, 68.72it/s]
PINN L-BFGS: 36%|███▌ | 71/200 [00:01<00:01, 70.69it/s]
PINN L-BFGS: 40%|████ | 80/200 [00:01<00:01, 74.20it/s]
PINN L-BFGS: 44%|████▍ | 89/200 [00:01<00:01, 78.64it/s]
PINN L-BFGS: 49%|████▉ | 98/200 [00:01<00:01, 78.97it/s]
PINN L-BFGS: 53%|█████▎ | 106/200 [00:01<00:01, 75.26it/s]
PINN L-BFGS: 57%|█████▋ | 114/200 [00:01<00:01, 72.65it/s]
PINN L-BFGS: 61%|██████ | 122/200 [00:01<00:01, 71.19it/s]
PINN L-BFGS: 65%|██████▌ | 130/200 [00:01<00:00, 70.09it/s]
PINN L-BFGS: 69%|██████▉ | 138/200 [00:01<00:00, 69.40it/s]
PINN L-BFGS: 72%|███████▎ | 145/200 [00:02<00:00, 68.98it/s]
PINN L-BFGS: 76%|███████▋ | 153/200 [00:02<00:00, 70.45it/s]
PINN L-BFGS: 80%|████████ | 161/200 [00:02<00:00, 69.60it/s]
PINN L-BFGS: 84%|████████▍ | 169/200 [00:02<00:00, 69.90it/s]
PINN L-BFGS: 88%|████████▊ | 177/200 [00:02<00:00, 70.47it/s]
PINN L-BFGS: 92%|█████████▎| 185/200 [00:02<00:00, 68.64it/s]
PINN L-BFGS: 96%|█████████▌| 192/200 [00:02<00:00, 67.57it/s]
PINN L-BFGS: 100%|█████████▉| 199/200 [00:02<00:00, 66.50it/s]
PINN L-BFGS: 100%|██████████| 200/200 [00:02<00:00, 70.72it/s]
Comparison: Standard NN vs PINN¶
def save_result_figure(model, x_domain_t, x_domain_np, u_exact_np,
x_data_np, u_data_np, filename, title,
x_colloc_np=None):
"""Save a publication-quality figure of model prediction vs exact."""
model.eval()
with torch.no_grad():
u_pred = model(x_domain_t).numpy()
fig, ax = plt.subplots(figsize=(8, 5))
# NN prediction first (underneath), thick line
ax.plot(x_domain_np, u_pred, color='tab:blue', linewidth=2.5,
label='NN Prediction')
# Exact solution on top, dark gray dashed
ax.plot(x_domain_np, u_exact_np, color='#333333', linestyle='--',
linewidth=2, label='Exact Solution', zorder=5)
ax.scatter(x_data_np, u_data_np, color='tab:red', s=80, zorder=6,
label='Training Data')
if x_colloc_np is not None:
ax.scatter(x_colloc_np, np.zeros_like(x_colloc_np) - 0.05,
color='#B8860B', s=30, marker='^', alpha=0.8,
zorder=4, label='Collocation Points')
ax.set_xlabel('x', fontsize=12)
ax.set_ylabel('u(x)', fontsize=12)
ax.set_title(title, fontsize=14)
ax.legend(frameon=True, fontsize=11)
ax.set_xlim([0, 1])
ax.set_ylim([-1.3, 1.3])
fig.tight_layout()
fig.savefig(filename, dpi=150, bbox_inches='tight')
print(f'Saved: {filename}')
plt.show()
plt.close(fig)
# Save standard NN result
save_result_figure(
model_nn, x_domain_t, x_domain_np, u_exact_np,
x_data_np, u_data_np,
filename=os.path.join(FIGS_DIR, 'poisson-result-nn.png'),
title='Standard NN: Data Only'
)
# Save PINN result
save_result_figure(
model_pinn, x_domain_t, x_domain_np, u_exact_np,
x_data_np, u_data_np,
filename=os.path.join(FIGS_DIR, 'poisson-result-pinn.png'),
title='PINN: Data + Physics',
x_colloc_np=x_colloc_np
)
Saved: ../../sciml-book/chapters/04-pinns/figs/poisson-result-nn.png
Saved: ../../sciml-book/chapters/04-pinns/figs/poisson-result-pinn.png
def plot_comparison(model_nn, model_pinn, problem, x_domain_t,
x_domain_np, u_exact_np, x_data_np, u_data_np):
"""Side-by-side comparison of NN vs PINN predictions."""
model_nn.eval()
model_pinn.eval()
with torch.no_grad():
u_pred_nn = model_nn(x_domain_t).numpy().ravel()
u_pred_pinn = model_pinn(x_domain_t).numpy().ravel()
x = x_domain_np.ravel()
u_exact = u_exact_np.ravel()
fig, axes = plt.subplots(1, 3, figsize=(16, 5))
# Panel 1: Both predictions
axes[0].plot(x, u_exact, 'k--', linewidth=2, label='Exact')
axes[0].plot(x, u_pred_nn, 'b-', linewidth=1.5, label='Standard NN', alpha=0.8)
axes[0].plot(x, u_pred_pinn, 'r-', linewidth=1.5, label='PINN', alpha=0.8)
axes[0].scatter(x_data_np, u_data_np, color='red', s=60, zorder=5, label='Data')
axes[0].set_xlabel('x')
axes[0].set_ylabel('u(x)')
axes[0].set_title('Predictions')
axes[0].legend()
# Panel 2: Pointwise error
error_nn = np.abs(u_pred_nn - u_exact)
error_pinn = np.abs(u_pred_pinn - u_exact)
axes[1].semilogy(x, error_nn, 'b-', linewidth=1.5, label='Standard NN')
axes[1].semilogy(x, error_pinn + 1e-16, 'r-', linewidth=1.5, label='PINN')
axes[1].set_xlabel('x')
axes[1].set_ylabel('|u_pred - u_exact|')
axes[1].set_title('Pointwise Error')
axes[1].legend()
# Panel 3: Derivatives
x_grad = x_domain_t.clone().detach().requires_grad_(True)
u_nn = model_nn(x_grad)
du_nn = torch.autograd.grad(u_nn, x_grad,
grad_outputs=torch.ones_like(u_nn),
create_graph=False)[0]
x_grad2 = x_domain_t.clone().detach().requires_grad_(True)
u_pinn = model_pinn(x_grad2)
du_pinn = torch.autograd.grad(u_pinn, x_grad2,
grad_outputs=torch.ones_like(u_pinn),
create_graph=False)[0]
du_exact = problem.exact_derivative(x_domain_np).ravel()
axes[2].plot(x, du_exact, 'k--', linewidth=2, label='Exact du/dx')
axes[2].plot(x, du_nn.detach().numpy().ravel(), 'b-', linewidth=1.5,
label='Standard NN', alpha=0.8)
axes[2].plot(x, du_pinn.detach().numpy().ravel(), 'r-', linewidth=1.5,
label='PINN', alpha=0.8)
axes[2].set_xlabel('x')
axes[2].set_ylabel("u'(x)")
axes[2].set_title('First Derivative')
axes[2].legend()
fig.tight_layout()
plt.show()
plot_comparison(model_nn, model_pinn, problem, x_domain_t,
x_domain_np, u_exact_np, x_data_np, u_data_np)
def plot_loss_histories(loss_nn, loss_pinn):
"""Plot training loss histories for both models."""
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(loss_nn, color='blue', label='Standard NN (data only)', linewidth=1.5)
ax.plot(loss_pinn, color='red', label='PINN (data + physics + BC)', linewidth=1.5)
ax.set_yscale('log')
ax.set_xlabel('Training Step')
ax.set_ylabel('Loss')
ax.set_title('Training Loss Comparison')
ax.legend()
ax.grid(True, linestyle=':', alpha=0.7)
fig.tight_layout()
plt.show()
plot_loss_histories(loss_nn, loss_pinn)
Exploring the Effect of Physics¶
What happens when we vary the physics loss weight $\lambda_{\text{phys}}$?
def sweep_lambda_phys(problem, lambdas=[0.0, 0.01, 0.1, 1.0, 10.0]):
"""Train PINNs with different physics weights and compare."""
x_domain_np, u_exact_np, x_data_np, u_data_np = problem.generate_data()
x_colloc_np = np.linspace(0.01, 0.99, 50).reshape(-1, 1)
x_data_t = torch.tensor(x_data_np, dtype=torch.float32)
u_data_t = torch.tensor(u_data_np, dtype=torch.float32)
x_domain_t = torch.tensor(x_domain_np, dtype=torch.float32)
x_colloc_t = torch.tensor(x_colloc_np, dtype=torch.float32)
x_bc_t = torch.tensor([[0.0], [1.0]], dtype=torch.float32)
freq = problem.freq
source_fn = lambda x: (freq * np.pi)**2 * torch.sin(freq * np.pi * x)
fig, axes = plt.subplots(1, len(lambdas), figsize=(4 * len(lambdas), 4),
sharey=True)
for i, lam in enumerate(lambdas):
torch.manual_seed(42)
model = NeuralNetwork([1, 32, 32, 32, 1])
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for step in range(15000):
optimizer.zero_grad()
loss, _, _, _ = pinn_loss(
model, x_data_t, u_data_t, x_colloc_t, x_bc_t, source_fn,
lambda_phys=lam, lambda_bc=10.0
)
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
u_pred = model(x_domain_t).numpy()
l2_err = np.sqrt(np.mean((u_pred.ravel() - u_exact_np.ravel())**2))
axes[i].plot(x_domain_np, u_exact_np, 'g--', linewidth=2, label='Exact')
axes[i].plot(x_domain_np, u_pred, 'b-', linewidth=2, label='Prediction')
axes[i].scatter(x_data_np, u_data_np, color='red', s=60, zorder=5)
axes[i].set_title(f'$\\lambda_{{\\mathrm{{phys}}}}$ = {lam}\nL2 err = {l2_err:.4f}')
axes[i].set_xlabel('x')
if i == 0:
axes[i].set_ylabel('u(x)')
axes[i].set_xlim([0, 1])
axes[i].set_ylim([-1.3, 1.3])
fig.suptitle('Effect of Physics Loss Weight', fontsize=14, y=1.02)
fig.tight_layout()
plt.show()
sweep_lambda_phys(problem)
Effect of Collocation Points¶
How does the number of collocation points $N_c$ affect accuracy?
def sweep_collocation_points(problem, n_colloc_list=[5, 10, 25, 50, 100]):
"""Train PINNs with different numbers of collocation points."""
x_domain_np, u_exact_np, x_data_np, u_data_np = problem.generate_data()
x_data_t = torch.tensor(x_data_np, dtype=torch.float32)
u_data_t = torch.tensor(u_data_np, dtype=torch.float32)
x_domain_t = torch.tensor(x_domain_np, dtype=torch.float32)
x_bc_t = torch.tensor([[0.0], [1.0]], dtype=torch.float32)
freq = problem.freq
source_fn = lambda x: (freq * np.pi)**2 * torch.sin(freq * np.pi * x)
fig, axes = plt.subplots(1, len(n_colloc_list),
figsize=(4 * len(n_colloc_list), 4), sharey=True)
for i, nc in enumerate(n_colloc_list):
torch.manual_seed(42)
x_colloc_np = np.linspace(0.01, 0.99, nc).reshape(-1, 1)
x_colloc_t = torch.tensor(x_colloc_np, dtype=torch.float32)
model = NeuralNetwork([1, 32, 32, 32, 1])
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for step in range(15000):
optimizer.zero_grad()
loss, _, _, _ = pinn_loss(
model, x_data_t, u_data_t, x_colloc_t, x_bc_t, source_fn,
lambda_phys=1.0, lambda_bc=10.0
)
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
u_pred = model(x_domain_t).numpy()
l2_err = np.sqrt(np.mean((u_pred.ravel() - u_exact_np.ravel())**2))
axes[i].plot(x_domain_np, u_exact_np, 'g--', linewidth=2, label='Exact')
axes[i].plot(x_domain_np, u_pred, 'b-', linewidth=2, label='Prediction')
axes[i].scatter(x_data_np, u_data_np, color='red', s=60, zorder=5)
axes[i].scatter(x_colloc_np, np.zeros_like(x_colloc_np) - 0.05,
color='green', s=20, marker='^', alpha=0.5)
axes[i].set_title(f'$N_c$ = {nc}\nL2 err = {l2_err:.4f}')
axes[i].set_xlabel('x')
if i == 0:
axes[i].set_ylabel('u(x)')
axes[i].set_xlim([0, 1])
axes[i].set_ylim([-1.3, 1.3])
fig.suptitle('Effect of Number of Collocation Points', fontsize=14, y=1.02)
fig.tight_layout()
plt.show()
sweep_collocation_points(problem)
Extrapolation: What Happens Beyond the Domain?¶
A natural question: if PINNs enforce the PDE inside $[0, 1]$, do they generalize outside it?
The answer is no. Outside the training domain, the physics loss provides no constraint. The network reverts to the behavior of any unconstrained function approximator. This is not a bug but a fundamental limitation. The PDE residual is only minimized where collocation points exist. Beyond those points, the network has no guidance.
This experiment evaluates both the standard NN and the PINN on the extended domain $[-0.5, 1.5]$. The exact solution $u(x) = \sin(2\pi x)$ is well-defined everywhere, so we can measure how both models extrapolate.
def plot_extrapolation(model_nn, model_pinn, problem, save_path=None):
"""Compare NN and PINN predictions on an extended domain beyond [0,1]."""
x_ext = np.linspace(-0.5, 1.5, 1000).reshape(-1, 1)
u_ext_exact = problem.exact_solution(x_ext).reshape(-1, 1)
x_ext_t = torch.tensor(x_ext, dtype=torch.float32)
# Data points used for training (for reference)
_, _, x_data_np, u_data_np = problem.generate_data()
model_nn.eval()
model_pinn.eval()
with torch.no_grad():
u_nn_ext = model_nn(x_ext_t).numpy()
u_pinn_ext = model_pinn(x_ext_t).numpy()
fig, axes = plt.subplots(1, 2, figsize=(14, 5))
for i, (u_pred, label, color) in enumerate([
(u_nn_ext, 'Standard NN', 'blue'),
(u_pinn_ext, 'PINN', 'red')
]):
ax = axes[i]
# Shade the training domain
ax.axvspan(0, 1, alpha=0.08, color='green', label='Training domain [0, 1]')
# Shade the data region
ax.axvspan(0.25, 0.75, alpha=0.12, color='orange', label='Data region [0.25, 0.75]')
ax.plot(x_ext, u_ext_exact, 'k--', linewidth=2, label='Exact')
ax.plot(x_ext, u_pred, color=color, linewidth=2, label=label)
ax.scatter(x_data_np, u_data_np, color='red', s=60, zorder=5, label='Data')
# L2 errors
mask_in = (x_ext.ravel() >= 0) & (x_ext.ravel() <= 1)
mask_out = ~mask_in
l2_in = np.sqrt(np.mean((u_pred[mask_in].ravel() - u_ext_exact[mask_in].ravel())**2))
l2_out = np.sqrt(np.mean((u_pred[mask_out].ravel() - u_ext_exact[mask_out].ravel())**2))
ax.set_title(f'{label}\nL2 inside [0,1]: {l2_in:.4f} | L2 outside: {l2_out:.4f}')
ax.set_xlabel('x')
ax.set_ylabel('u(x)')
ax.set_xlim([-0.5, 1.5])
ax.set_ylim([-1.5, 2.0])
ax.legend(fontsize=9)
fig.suptitle('Extrapolation Beyond the Training Domain', fontsize=14)
fig.tight_layout()
if save_path:
fig.savefig(save_path, dpi=150, bbox_inches='tight')
print(f'Saved: {save_path}')
plt.show()
# Print summary
mask_in = (x_ext.ravel() >= 0) & (x_ext.ravel() <= 1)
mask_out = ~mask_in
print(f'\nSummary:')
print(f' Standard NN - L2 inside [0,1]: {np.sqrt(np.mean((u_nn_ext[mask_in].ravel() - u_ext_exact[mask_in].ravel())**2)):.4f}'
f' | L2 outside: {np.sqrt(np.mean((u_nn_ext[mask_out].ravel() - u_ext_exact[mask_out].ravel())**2)):.4f}')
print(f' PINN - L2 inside [0,1]: {np.sqrt(np.mean((u_pinn_ext[mask_in].ravel() - u_ext_exact[mask_in].ravel())**2)):.4f}'
f' | L2 outside: {np.sqrt(np.mean((u_pinn_ext[mask_out].ravel() - u_ext_exact[mask_out].ravel())**2)):.4f}')
plot_extrapolation(model_nn, model_pinn, problem,
save_path=os.path.join(FIGS_DIR, 'poisson-extrapolation.png'))
Saved: ../../sciml-book/chapters/04-pinns/figs/poisson-extrapolation.png
Summary: Standard NN - L2 inside [0,1]: 0.4077 | L2 outside: 1.8508 PINN - L2 inside [0,1]: 0.0000 | L2 outside: 0.6063
Takeaway: Inside $[0, 1]$, the PINN dramatically outperforms the standard NN because the physics loss constrains the solution everywhere the collocation points sample. Outside that domain, neither model has any constraint, and both degrade. The PINN's physics-based regularization is local to the collocation region. This is an important practical limitation: PINNs interpolate with physics, they do not extrapolate with physics.
Spectral Bias: When PINNs Fail¶
The PINN above solves the Poisson equation with $u(x) = \sin(2\pi x)$ nearly perfectly. But what happens if we increase the frequency of the target solution?
Neural networks with smooth activations like $\tanh$ learn low-frequency components of the target function faster than high-frequency components. This phenomenon, called spectral bias (also known as the frequency principle), is an optimization failure, not a representation failure. The network has enough parameters to represent the high-frequency solution, but gradient descent converges to it extremely slowly.
We demonstrate this by training the same PINN architecture on $u(x) = \sin(10\pi x)$. Nothing changes except the frequency of the target.
def train_pinn_for_spectral_bias(source_freq, num_adam_steps=15000, num_lbfgs_steps=200):
"""Train a PINN for a given source frequency and return the model and data."""
problem = PoissonProblem(source_freq=source_freq)
x_domain_np, u_exact_np, x_data_np, u_data_np = problem.generate_data()
x_colloc_np = np.linspace(0.01, 0.99, 50).reshape(-1, 1)
layer_sizes = [1, 32, 32, 32, 1]
torch.manual_seed(42)
model = NeuralNetwork(layer_sizes)
x_data_t = torch.tensor(x_data_np, dtype=torch.float32)
u_data_t = torch.tensor(u_data_np, dtype=torch.float32)
x_domain_t = torch.tensor(x_domain_np, dtype=torch.float32)
x_colloc_t = torch.tensor(x_colloc_np, dtype=torch.float32)
x_bc_t = torch.tensor([[0.0], [1.0]], dtype=torch.float32)
freq = problem.freq
source_fn = lambda x: (freq * np.pi)**2 * torch.sin(freq * np.pi * x)
# Phase 1: Adam
optimizer_adam = optim.Adam(model.parameters(), lr=1e-3)
for step in tqdm(range(num_adam_steps), desc=f'PINN Adam (freq={source_freq})'):
model.train()
optimizer_adam.zero_grad()
loss, _, _, _ = pinn_loss(
model, x_data_t, u_data_t, x_colloc_t, x_bc_t, source_fn,
lambda_phys=1.0, lambda_bc=10.0
)
loss.backward()
optimizer_adam.step()
# Phase 2: L-BFGS
optimizer_lbfgs = optim.LBFGS(
model.parameters(), lr=0.5,
max_iter=20, history_size=50,
tolerance_grad=1e-9, tolerance_change=1e-11
)
for step in tqdm(range(num_lbfgs_steps), desc=f'PINN L-BFGS (freq={source_freq})'):
def closure():
optimizer_lbfgs.zero_grad()
loss, _, _, _ = pinn_loss(
model, x_data_t, u_data_t, x_colloc_t, x_bc_t, source_fn,
lambda_phys=1.0, lambda_bc=10.0
)
loss.backward()
return loss
optimizer_lbfgs.step(closure)
model.eval()
with torch.no_grad():
u_pred = model(x_domain_t).numpy()
l2_err = np.sqrt(np.mean((u_pred.ravel() - u_exact_np.ravel())**2))
return model, x_domain_np, u_exact_np, u_pred, x_data_np, u_data_np, l2_err
def plot_spectral_bias(save_path=None):
"""Side-by-side comparison of PINN at freq=2 vs freq=10."""
# Train for freq=2 (should succeed)
model_f2, x_f2, u_exact_f2, u_pred_f2, xd_f2, ud_f2, l2_f2 = \
train_pinn_for_spectral_bias(source_freq=2.0)
# Train for freq=10 (should fail due to spectral bias)
model_f10, x_f10, u_exact_f10, u_pred_f10, xd_f10, ud_f10, l2_f10 = \
train_pinn_for_spectral_bias(source_freq=10.0)
fig, axes = plt.subplots(1, 2, figsize=(14, 5))
# Left: freq=2 (success)
axes[0].plot(x_f2, u_exact_f2, 'g--', linewidth=2, label='Exact')
axes[0].plot(x_f2, u_pred_f2, 'b-', linewidth=2, label='PINN')
axes[0].scatter(xd_f2, ud_f2, color='red', s=60, zorder=5, label='Data')
axes[0].set_title(f'$u(x) = \\sin(2\\pi x)$\nL2 error = {l2_f2:.2e}', fontsize=13)
axes[0].set_xlabel('x', fontsize=12)
axes[0].set_ylabel('u(x)', fontsize=12)
axes[0].set_xlim([0, 1])
axes[0].set_ylim([-1.3, 1.3])
axes[0].legend(fontsize=11)
# Right: freq=10 (failure)
axes[1].plot(x_f10, u_exact_f10, 'g--', linewidth=2, label='Exact')
axes[1].plot(x_f10, u_pred_f10, 'b-', linewidth=2, label='PINN')
axes[1].scatter(xd_f10, ud_f10, color='red', s=60, zorder=5, label='Data')
axes[1].set_title(f'$u(x) = \\sin(10\\pi x)$\nL2 error = {l2_f10:.2e}', fontsize=13)
axes[1].set_xlabel('x', fontsize=12)
axes[1].set_ylabel('u(x)', fontsize=12)
axes[1].set_xlim([0, 1])
axes[1].set_ylim([-1.3, 1.3])
axes[1].legend(fontsize=11)
fig.suptitle('Spectral Bias: Same Architecture, Different Frequency', fontsize=14)
fig.tight_layout()
if save_path:
fig.savefig(save_path, dpi=150, bbox_inches='tight')
print(f'Saved: {save_path}')
plt.show()
print(f'\nFreq=2 L2 error: {l2_f2:.2e}')
print(f'Freq=10 L2 error: {l2_f10:.2e}')
plot_spectral_bias(save_path=os.path.join(FIGS_DIR, 'poisson-spectral-bias.png'))
PINN Adam (freq=2.0): 0%| | 0/15000 [00:00<?, ?it/s]
PINN Adam (freq=2.0): 1%| | 151/15000 [00:00<00:09, 1504.78it/s]
PINN Adam (freq=2.0): 2%|▏ | 303/15000 [00:00<00:09, 1508.28it/s]
PINN Adam (freq=2.0): 3%|▎ | 454/15000 [00:00<00:09, 1502.16it/s]
PINN Adam (freq=2.0): 4%|▍ | 605/15000 [00:00<00:09, 1490.19it/s]
PINN Adam (freq=2.0): 5%|▌ | 755/15000 [00:00<00:09, 1492.40it/s]
PINN Adam (freq=2.0): 6%|▌ | 905/15000 [00:00<00:09, 1484.13it/s]
PINN Adam (freq=2.0): 7%|▋ | 1054/15000 [00:00<00:09, 1476.98it/s]
PINN Adam (freq=2.0): 8%|▊ | 1203/15000 [00:00<00:09, 1479.39it/s]
PINN Adam (freq=2.0): 9%|▉ | 1352/15000 [00:00<00:09, 1480.63it/s]
PINN Adam (freq=2.0): 10%|█ | 1501/15000 [00:01<00:09, 1481.72it/s]
PINN Adam (freq=2.0): 11%|█ | 1651/15000 [00:01<00:08, 1485.06it/s]
PINN Adam (freq=2.0): 12%|█▏ | 1800/15000 [00:01<00:08, 1472.25it/s]
PINN Adam (freq=2.0): 13%|█▎ | 1948/15000 [00:01<00:08, 1473.81it/s]
PINN Adam (freq=2.0): 14%|█▍ | 2098/15000 [00:01<00:08, 1479.11it/s]
PINN Adam (freq=2.0): 15%|█▍ | 2247/15000 [00:01<00:08, 1480.24it/s]
PINN Adam (freq=2.0): 16%|█▌ | 2396/15000 [00:01<00:08, 1482.32it/s]
PINN Adam (freq=2.0): 17%|█▋ | 2548/15000 [00:01<00:08, 1493.53it/s]
PINN Adam (freq=2.0): 18%|█▊ | 2698/15000 [00:01<00:08, 1485.91it/s]
PINN Adam (freq=2.0): 19%|█▉ | 2849/15000 [00:01<00:08, 1490.60it/s]
PINN Adam (freq=2.0): 20%|██ | 3000/15000 [00:02<00:08, 1495.00it/s]
PINN Adam (freq=2.0): 21%|██ | 3150/15000 [00:02<00:07, 1491.76it/s]
PINN Adam (freq=2.0): 22%|██▏ | 3300/15000 [00:02<00:07, 1487.06it/s]
PINN Adam (freq=2.0): 23%|██▎ | 3449/15000 [00:02<00:07, 1485.01it/s]
PINN Adam (freq=2.0): 24%|██▍ | 3599/15000 [00:02<00:07, 1488.35it/s]
PINN Adam (freq=2.0): 25%|██▍ | 3748/15000 [00:02<00:07, 1485.01it/s]
PINN Adam (freq=2.0): 26%|██▌ | 3897/15000 [00:02<00:07, 1483.87it/s]
PINN Adam (freq=2.0): 27%|██▋ | 4047/15000 [00:02<00:07, 1486.67it/s]
PINN Adam (freq=2.0): 28%|██▊ | 4196/15000 [00:02<00:07, 1480.38it/s]
PINN Adam (freq=2.0): 29%|██▉ | 4345/15000 [00:02<00:07, 1480.52it/s]
PINN Adam (freq=2.0): 30%|██▉ | 4494/15000 [00:03<00:07, 1480.71it/s]
PINN Adam (freq=2.0): 31%|███ | 4643/15000 [00:03<00:07, 1477.20it/s]
PINN Adam (freq=2.0): 32%|███▏ | 4793/15000 [00:03<00:06, 1480.77it/s]
PINN Adam (freq=2.0): 33%|███▎ | 4944/15000 [00:03<00:06, 1488.82it/s]
PINN Adam (freq=2.0): 34%|███▍ | 5093/15000 [00:03<00:06, 1487.96it/s]
PINN Adam (freq=2.0): 35%|███▍ | 5244/15000 [00:03<00:06, 1493.56it/s]
PINN Adam (freq=2.0): 36%|███▌ | 5394/15000 [00:03<00:06, 1486.00it/s]
PINN Adam (freq=2.0): 37%|███▋ | 5545/15000 [00:03<00:06, 1492.53it/s]
PINN Adam (freq=2.0): 38%|███▊ | 5695/15000 [00:03<00:06, 1489.34it/s]
PINN Adam (freq=2.0): 39%|███▉ | 5845/15000 [00:03<00:06, 1490.26it/s]
PINN Adam (freq=2.0): 40%|███▉ | 5995/15000 [00:04<00:06, 1490.83it/s]
PINN Adam (freq=2.0): 41%|████ | 6147/15000 [00:04<00:05, 1499.32it/s]
PINN Adam (freq=2.0): 42%|████▏ | 6300/15000 [00:04<00:05, 1507.46it/s]
PINN Adam (freq=2.0): 43%|████▎ | 6453/15000 [00:04<00:05, 1511.19it/s]
PINN Adam (freq=2.0): 44%|████▍ | 6605/15000 [00:04<00:05, 1504.96it/s]
PINN Adam (freq=2.0): 45%|████▌ | 6757/15000 [00:04<00:05, 1508.23it/s]
PINN Adam (freq=2.0): 46%|████▌ | 6908/15000 [00:04<00:05, 1499.25it/s]
PINN Adam (freq=2.0): 47%|████▋ | 7059/15000 [00:04<00:05, 1499.56it/s]
PINN Adam (freq=2.0): 48%|████▊ | 7209/15000 [00:04<00:05, 1486.46it/s]
PINN Adam (freq=2.0): 49%|████▉ | 7359/15000 [00:04<00:05, 1489.85it/s]
PINN Adam (freq=2.0): 50%|█████ | 7509/15000 [00:05<00:05, 1492.38it/s]
PINN Adam (freq=2.0): 51%|█████ | 7660/15000 [00:05<00:04, 1497.13it/s]
PINN Adam (freq=2.0): 52%|█████▏ | 7811/15000 [00:05<00:04, 1498.11it/s]
PINN Adam (freq=2.0): 53%|█████▎ | 7963/15000 [00:05<00:04, 1504.13it/s]
PINN Adam (freq=2.0): 54%|█████▍ | 8114/15000 [00:05<00:04, 1505.49it/s]
PINN Adam (freq=2.0): 55%|█████▌ | 8265/15000 [00:05<00:04, 1494.13it/s]
PINN Adam (freq=2.0): 56%|█████▌ | 8415/15000 [00:05<00:04, 1492.97it/s]
PINN Adam (freq=2.0): 57%|█████▋ | 8565/15000 [00:05<00:04, 1493.75it/s]
PINN Adam (freq=2.0): 58%|█████▊ | 8716/15000 [00:05<00:04, 1495.73it/s]
PINN Adam (freq=2.0): 59%|█████▉ | 8866/15000 [00:05<00:04, 1478.14it/s]
PINN Adam (freq=2.0): 60%|██████ | 9014/15000 [00:06<00:04, 1455.19it/s]
PINN Adam (freq=2.0): 61%|██████ | 9160/15000 [00:06<00:04, 1446.79it/s]
PINN Adam (freq=2.0): 62%|██████▏ | 9310/15000 [00:06<00:03, 1461.08it/s]
PINN Adam (freq=2.0): 63%|██████▎ | 9462/15000 [00:06<00:03, 1478.10it/s]
PINN Adam (freq=2.0): 64%|██████▍ | 9614/15000 [00:06<00:03, 1490.44it/s]
PINN Adam (freq=2.0): 65%|██████▌ | 9765/15000 [00:06<00:03, 1496.05it/s]
PINN Adam (freq=2.0): 66%|██████▌ | 9918/15000 [00:06<00:03, 1504.53it/s]
PINN Adam (freq=2.0): 67%|██████▋ | 10069/15000 [00:06<00:03, 1505.21it/s]
PINN Adam (freq=2.0): 68%|██████▊ | 10221/15000 [00:06<00:03, 1508.07it/s]
PINN Adam (freq=2.0): 69%|██████▉ | 10375/15000 [00:06<00:03, 1515.94it/s]
PINN Adam (freq=2.0): 70%|███████ | 10528/15000 [00:07<00:02, 1519.52it/s]
PINN Adam (freq=2.0): 71%|███████ | 10681/15000 [00:07<00:02, 1520.34it/s]
PINN Adam (freq=2.0): 72%|███████▏ | 10834/15000 [00:07<00:02, 1520.67it/s]
PINN Adam (freq=2.0): 73%|███████▎ | 10987/15000 [00:07<00:02, 1520.50it/s]
PINN Adam (freq=2.0): 74%|███████▍ | 11140/15000 [00:07<00:02, 1519.54it/s]
PINN Adam (freq=2.0): 75%|███████▌ | 11292/15000 [00:07<00:02, 1517.86it/s]
PINN Adam (freq=2.0): 76%|███████▋ | 11445/15000 [00:07<00:02, 1521.40it/s]
PINN Adam (freq=2.0): 77%|███████▋ | 11598/15000 [00:07<00:02, 1521.88it/s]
PINN Adam (freq=2.0): 78%|███████▊ | 11751/15000 [00:07<00:02, 1523.90it/s]
PINN Adam (freq=2.0): 79%|███████▉ | 11904/15000 [00:07<00:02, 1522.96it/s]
PINN Adam (freq=2.0): 80%|████████ | 12057/15000 [00:08<00:01, 1522.40it/s]
PINN Adam (freq=2.0): 81%|████████▏ | 12210/15000 [00:08<00:01, 1524.09it/s]
PINN Adam (freq=2.0): 82%|████████▏ | 12364/15000 [00:08<00:01, 1526.43it/s]
PINN Adam (freq=2.0): 83%|████████▎ | 12518/15000 [00:08<00:01, 1528.57it/s]
PINN Adam (freq=2.0): 84%|████████▍ | 12672/15000 [00:08<00:01, 1529.90it/s]
PINN Adam (freq=2.0): 86%|████████▌ | 12825/15000 [00:08<00:01, 1528.77it/s]
PINN Adam (freq=2.0): 87%|████████▋ | 12978/15000 [00:08<00:01, 1526.63it/s]
PINN Adam (freq=2.0): 88%|████████▊ | 13131/15000 [00:08<00:01, 1522.43it/s]
PINN Adam (freq=2.0): 89%|████████▊ | 13284/15000 [00:08<00:01, 1518.49it/s]
PINN Adam (freq=2.0): 90%|████████▉ | 13436/15000 [00:08<00:01, 1518.58it/s]
PINN Adam (freq=2.0): 91%|█████████ | 13589/15000 [00:09<00:00, 1521.78it/s]
PINN Adam (freq=2.0): 92%|█████████▏| 13743/15000 [00:09<00:00, 1524.58it/s]
PINN Adam (freq=2.0): 93%|█████████▎| 13896/15000 [00:09<00:00, 1522.11it/s]
PINN Adam (freq=2.0): 94%|█████████▎| 14049/15000 [00:09<00:00, 1518.72it/s]
PINN Adam (freq=2.0): 95%|█████████▍| 14202/15000 [00:09<00:00, 1519.40it/s]
PINN Adam (freq=2.0): 96%|█████████▌| 14354/15000 [00:09<00:00, 1518.93it/s]
PINN Adam (freq=2.0): 97%|█████████▋| 14506/15000 [00:09<00:00, 1518.38it/s]
PINN Adam (freq=2.0): 98%|█████████▊| 14658/15000 [00:09<00:00, 1518.67it/s]
PINN Adam (freq=2.0): 99%|█████████▊| 14810/15000 [00:09<00:00, 1517.91it/s]
PINN Adam (freq=2.0): 100%|█████████▉| 14962/15000 [00:09<00:00, 1517.93it/s]
PINN Adam (freq=2.0): 100%|██████████| 15000/15000 [00:10<00:00, 1499.60it/s]
PINN L-BFGS (freq=2.0): 0%| | 0/200 [00:00<?, ?it/s]
PINN L-BFGS (freq=2.0): 3%|▎ | 6/200 [00:00<00:03, 56.55it/s]
PINN L-BFGS (freq=2.0): 6%|▌ | 12/200 [00:00<00:03, 53.88it/s]
PINN L-BFGS (freq=2.0): 9%|▉ | 18/200 [00:00<00:03, 53.12it/s]
PINN L-BFGS (freq=2.0): 12%|█▏ | 24/200 [00:00<00:03, 53.28it/s]
PINN L-BFGS (freq=2.0): 15%|█▌ | 30/200 [00:00<00:03, 54.87it/s]
PINN L-BFGS (freq=2.0): 18%|█▊ | 36/200 [00:00<00:03, 54.20it/s]
PINN L-BFGS (freq=2.0): 21%|██ | 42/200 [00:00<00:02, 55.12it/s]
PINN L-BFGS (freq=2.0): 24%|██▍ | 48/200 [00:00<00:02, 54.42it/s]
PINN L-BFGS (freq=2.0): 27%|██▋ | 54/200 [00:00<00:02, 53.97it/s]
PINN L-BFGS (freq=2.0): 30%|███ | 60/200 [00:01<00:02, 53.67it/s]
PINN L-BFGS (freq=2.0): 33%|███▎ | 66/200 [00:01<00:02, 55.22it/s]
PINN L-BFGS (freq=2.0): 36%|███▌ | 72/200 [00:01<00:02, 54.71it/s]
PINN L-BFGS (freq=2.0): 39%|███▉ | 78/200 [00:01<00:02, 54.32it/s]
PINN L-BFGS (freq=2.0): 42%|████▏ | 84/200 [00:01<00:02, 53.77it/s]
PINN L-BFGS (freq=2.0): 45%|████▌ | 90/200 [00:01<00:02, 53.48it/s]
PINN L-BFGS (freq=2.0): 48%|████▊ | 96/200 [00:01<00:01, 53.21it/s]
PINN L-BFGS (freq=2.0): 51%|█████ | 102/200 [00:01<00:01, 54.23it/s]
PINN L-BFGS (freq=2.0): 54%|█████▍ | 108/200 [00:01<00:01, 53.70it/s]
PINN L-BFGS (freq=2.0): 57%|█████▋ | 114/200 [00:02<00:01, 53.23it/s]
PINN L-BFGS (freq=2.0): 60%|██████ | 120/200 [00:02<00:01, 52.25it/s]
PINN L-BFGS (freq=2.0): 63%|██████▎ | 126/200 [00:02<00:01, 52.95it/s]
PINN L-BFGS (freq=2.0): 66%|██████▌ | 132/200 [00:02<00:01, 53.49it/s]
PINN L-BFGS (freq=2.0): 69%|██████▉ | 138/200 [00:02<00:01, 53.13it/s]
PINN L-BFGS (freq=2.0): 72%|███████▏ | 144/200 [00:02<00:01, 52.26it/s]
PINN L-BFGS (freq=2.0): 75%|███████▌ | 150/200 [00:02<00:00, 53.38it/s]
PINN L-BFGS (freq=2.0): 78%|███████▊ | 157/200 [00:02<00:00, 56.94it/s]
PINN L-BFGS (freq=2.0): 82%|████████▏ | 164/200 [00:03<00:00, 57.87it/s]
PINN L-BFGS (freq=2.0): 85%|████████▌ | 170/200 [00:03<00:00, 57.89it/s]
PINN L-BFGS (freq=2.0): 88%|████████▊ | 176/200 [00:03<00:00, 55.88it/s]
PINN L-BFGS (freq=2.0): 91%|█████████ | 182/200 [00:03<00:00, 56.03it/s]
PINN L-BFGS (freq=2.0): 94%|█████████▍| 188/200 [00:03<00:00, 55.84it/s]
PINN L-BFGS (freq=2.0): 97%|█████████▋| 194/200 [00:03<00:00, 54.47it/s]
PINN L-BFGS (freq=2.0): 100%|██████████| 200/200 [00:03<00:00, 54.57it/s]
PINN L-BFGS (freq=2.0): 100%|██████████| 200/200 [00:03<00:00, 54.40it/s]
PINN Adam (freq=10.0): 0%| | 0/15000 [00:00<?, ?it/s]
PINN Adam (freq=10.0): 1%| | 149/15000 [00:00<00:09, 1488.19it/s]
PINN Adam (freq=10.0): 2%|▏ | 298/15000 [00:00<00:09, 1477.55it/s]
PINN Adam (freq=10.0): 3%|▎ | 447/15000 [00:00<00:09, 1480.74it/s]
PINN Adam (freq=10.0): 4%|▍ | 596/15000 [00:00<00:09, 1481.12it/s]
PINN Adam (freq=10.0): 5%|▍ | 745/15000 [00:00<00:09, 1483.03it/s]
PINN Adam (freq=10.0): 6%|▌ | 897/15000 [00:00<00:09, 1494.81it/s]
PINN Adam (freq=10.0): 7%|▋ | 1047/15000 [00:00<00:09, 1492.13it/s]
PINN Adam (freq=10.0): 8%|▊ | 1200/15000 [00:00<00:09, 1501.47it/s]
PINN Adam (freq=10.0): 9%|▉ | 1352/15000 [00:00<00:09, 1505.64it/s]
PINN Adam (freq=10.0): 10%|█ | 1505/15000 [00:01<00:08, 1511.84it/s]
PINN Adam (freq=10.0): 11%|█ | 1657/15000 [00:01<00:08, 1502.59it/s]
PINN Adam (freq=10.0): 12%|█▏ | 1808/15000 [00:01<00:08, 1480.75it/s]
PINN Adam (freq=10.0): 13%|█▎ | 1957/15000 [00:01<00:08, 1478.77it/s]
PINN Adam (freq=10.0): 14%|█▍ | 2106/15000 [00:01<00:08, 1482.02it/s]
PINN Adam (freq=10.0): 15%|█▌ | 2257/15000 [00:01<00:08, 1489.38it/s]
PINN Adam (freq=10.0): 16%|█▌ | 2410/15000 [00:01<00:08, 1499.71it/s]
PINN Adam (freq=10.0): 17%|█▋ | 2562/15000 [00:01<00:08, 1503.31it/s]
PINN Adam (freq=10.0): 18%|█▊ | 2714/15000 [00:01<00:08, 1508.05it/s]
PINN Adam (freq=10.0): 19%|█▉ | 2867/15000 [00:01<00:08, 1511.78it/s]
PINN Adam (freq=10.0): 20%|██ | 3020/15000 [00:02<00:07, 1514.58it/s]
PINN Adam (freq=10.0): 21%|██ | 3173/15000 [00:02<00:07, 1517.08it/s]
PINN Adam (freq=10.0): 22%|██▏ | 3325/15000 [00:02<00:07, 1516.90it/s]
PINN Adam (freq=10.0): 23%|██▎ | 3478/15000 [00:02<00:07, 1518.98it/s]
PINN Adam (freq=10.0): 24%|██▍ | 3631/15000 [00:02<00:07, 1520.62it/s]
PINN Adam (freq=10.0): 25%|██▌ | 3784/15000 [00:02<00:07, 1521.51it/s]
PINN Adam (freq=10.0): 26%|██▋ | 3938/15000 [00:02<00:07, 1526.83it/s]
PINN Adam (freq=10.0): 27%|██▋ | 4092/15000 [00:02<00:07, 1528.62it/s]
PINN Adam (freq=10.0): 28%|██▊ | 4245/15000 [00:02<00:07, 1525.86it/s]
PINN Adam (freq=10.0): 29%|██▉ | 4399/15000 [00:02<00:06, 1527.99it/s]
PINN Adam (freq=10.0): 30%|███ | 4552/15000 [00:03<00:06, 1521.08it/s]
PINN Adam (freq=10.0): 31%|███▏ | 4705/15000 [00:03<00:06, 1518.19it/s]
PINN Adam (freq=10.0): 32%|███▏ | 4858/15000 [00:03<00:06, 1520.03it/s]
PINN Adam (freq=10.0): 33%|███▎ | 5011/15000 [00:03<00:06, 1517.94it/s]
PINN Adam (freq=10.0): 34%|███▍ | 5164/15000 [00:03<00:06, 1518.83it/s]
PINN Adam (freq=10.0): 35%|███▌ | 5317/15000 [00:03<00:06, 1521.20it/s]
PINN Adam (freq=10.0): 36%|███▋ | 5470/15000 [00:03<00:06, 1520.57it/s]
PINN Adam (freq=10.0): 37%|███▋ | 5624/15000 [00:03<00:06, 1526.29it/s]
PINN Adam (freq=10.0): 39%|███▊ | 5778/15000 [00:03<00:06, 1527.60it/s]
PINN Adam (freq=10.0): 40%|███▉ | 5933/15000 [00:03<00:05, 1531.71it/s]
PINN Adam (freq=10.0): 41%|████ | 6087/15000 [00:04<00:05, 1526.86it/s]
PINN Adam (freq=10.0): 42%|████▏ | 6241/15000 [00:04<00:05, 1530.09it/s]
PINN Adam (freq=10.0): 43%|████▎ | 6395/15000 [00:04<00:05, 1528.66it/s]
PINN Adam (freq=10.0): 44%|████▎ | 6550/15000 [00:04<00:05, 1532.17it/s]
PINN Adam (freq=10.0): 45%|████▍ | 6704/15000 [00:04<00:05, 1532.02it/s]
PINN Adam (freq=10.0): 46%|████▌ | 6858/15000 [00:04<00:05, 1530.19it/s]
PINN Adam (freq=10.0): 47%|████▋ | 7012/15000 [00:04<00:05, 1532.11it/s]
PINN Adam (freq=10.0): 48%|████▊ | 7166/15000 [00:04<00:05, 1529.35it/s]
PINN Adam (freq=10.0): 49%|████▉ | 7320/15000 [00:04<00:05, 1530.38it/s]
PINN Adam (freq=10.0): 50%|████▉ | 7474/15000 [00:04<00:04, 1530.66it/s]
PINN Adam (freq=10.0): 51%|█████ | 7628/15000 [00:05<00:04, 1530.23it/s]
PINN Adam (freq=10.0): 52%|█████▏ | 7782/15000 [00:05<00:04, 1528.41it/s]
PINN Adam (freq=10.0): 53%|█████▎ | 7935/15000 [00:05<00:04, 1528.38it/s]
PINN Adam (freq=10.0): 54%|█████▍ | 8088/15000 [00:05<00:04, 1523.01it/s]
PINN Adam (freq=10.0): 55%|█████▍ | 8241/15000 [00:05<00:04, 1524.51it/s]
PINN Adam (freq=10.0): 56%|█████▌ | 8395/15000 [00:05<00:04, 1528.18it/s]
PINN Adam (freq=10.0): 57%|█████▋ | 8549/15000 [00:05<00:04, 1529.87it/s]
PINN Adam (freq=10.0): 58%|█████▊ | 8703/15000 [00:05<00:04, 1531.52it/s]
PINN Adam (freq=10.0): 59%|█████▉ | 8857/15000 [00:05<00:04, 1530.29it/s]
PINN Adam (freq=10.0): 60%|██████ | 9011/15000 [00:05<00:03, 1530.02it/s]
PINN Adam (freq=10.0): 61%|██████ | 9165/15000 [00:06<00:03, 1529.53it/s]
PINN Adam (freq=10.0): 62%|██████▏ | 9318/15000 [00:06<00:03, 1526.22it/s]
PINN Adam (freq=10.0): 63%|██████▎ | 9472/15000 [00:06<00:03, 1530.02it/s]
PINN Adam (freq=10.0): 64%|██████▍ | 9626/15000 [00:06<00:03, 1528.67it/s]
PINN Adam (freq=10.0): 65%|██████▌ | 9779/15000 [00:06<00:03, 1528.67it/s]
PINN Adam (freq=10.0): 66%|██████▌ | 9932/15000 [00:06<00:03, 1528.83it/s]
PINN Adam (freq=10.0): 67%|██████▋ | 10085/15000 [00:06<00:03, 1529.01it/s]
PINN Adam (freq=10.0): 68%|██████▊ | 10239/15000 [00:06<00:03, 1530.09it/s]
PINN Adam (freq=10.0): 69%|██████▉ | 10393/15000 [00:06<00:03, 1530.68it/s]
PINN Adam (freq=10.0): 70%|███████ | 10547/15000 [00:06<00:02, 1530.86it/s]
PINN Adam (freq=10.0): 71%|███████▏ | 10701/15000 [00:07<00:02, 1533.47it/s]
PINN Adam (freq=10.0): 72%|███████▏ | 10855/15000 [00:07<00:02, 1529.18it/s]
PINN Adam (freq=10.0): 73%|███████▎ | 11009/15000 [00:07<00:02, 1530.31it/s]
PINN Adam (freq=10.0): 74%|███████▍ | 11163/15000 [00:07<00:02, 1530.88it/s]
PINN Adam (freq=10.0): 75%|███████▌ | 11317/15000 [00:07<00:02, 1529.86it/s]
PINN Adam (freq=10.0): 76%|███████▋ | 11471/15000 [00:07<00:02, 1532.48it/s]
PINN Adam (freq=10.0): 78%|███████▊ | 11625/15000 [00:07<00:02, 1524.44it/s]
PINN Adam (freq=10.0): 79%|███████▊ | 11778/15000 [00:07<00:02, 1520.51it/s]
PINN Adam (freq=10.0): 80%|███████▉ | 11931/15000 [00:07<00:02, 1510.40it/s]
PINN Adam (freq=10.0): 81%|████████ | 12083/15000 [00:07<00:01, 1500.31it/s]
PINN Adam (freq=10.0): 82%|████████▏ | 12234/15000 [00:08<00:01, 1491.09it/s]
PINN Adam (freq=10.0): 83%|████████▎ | 12384/15000 [00:08<00:01, 1482.14it/s]
PINN Adam (freq=10.0): 84%|████████▎ | 12533/15000 [00:08<00:01, 1477.04it/s]
PINN Adam (freq=10.0): 85%|████████▍ | 12686/15000 [00:08<00:01, 1489.77it/s]
PINN Adam (freq=10.0): 86%|████████▌ | 12838/15000 [00:08<00:01, 1497.03it/s]
PINN Adam (freq=10.0): 87%|████████▋ | 12990/15000 [00:08<00:01, 1501.99it/s]
PINN Adam (freq=10.0): 88%|████████▊ | 13142/15000 [00:08<00:01, 1506.39it/s]
PINN Adam (freq=10.0): 89%|████████▊ | 13295/15000 [00:08<00:01, 1513.13it/s]
PINN Adam (freq=10.0): 90%|████████▉ | 13449/15000 [00:08<00:01, 1518.88it/s]
PINN Adam (freq=10.0): 91%|█████████ | 13602/15000 [00:08<00:00, 1519.85it/s]
PINN Adam (freq=10.0): 92%|█████████▏| 13755/15000 [00:09<00:00, 1520.39it/s]
PINN Adam (freq=10.0): 93%|█████████▎| 13908/15000 [00:09<00:00, 1515.83it/s]
PINN Adam (freq=10.0): 94%|█████████▎| 14061/15000 [00:09<00:00, 1517.69it/s]
PINN Adam (freq=10.0): 95%|█████████▍| 14214/15000 [00:09<00:00, 1519.83it/s]
PINN Adam (freq=10.0): 96%|█████████▌| 14367/15000 [00:09<00:00, 1522.63it/s]
PINN Adam (freq=10.0): 97%|█████████▋| 14521/15000 [00:09<00:00, 1526.18it/s]
PINN Adam (freq=10.0): 98%|█████████▊| 14674/15000 [00:09<00:00, 1525.39it/s]
PINN Adam (freq=10.0): 99%|█████████▉| 14827/15000 [00:09<00:00, 1522.68it/s]
PINN Adam (freq=10.0): 100%|█████████▉| 14981/15000 [00:09<00:00, 1525.06it/s]
PINN Adam (freq=10.0): 100%|██████████| 15000/15000 [00:09<00:00, 1517.42it/s]
PINN L-BFGS (freq=10.0): 0%| | 0/200 [00:00<?, ?it/s]
PINN L-BFGS (freq=10.0): 3%|▎ | 6/200 [00:00<00:03, 57.00it/s]
PINN L-BFGS (freq=10.0): 6%|▌ | 12/200 [00:00<00:03, 54.67it/s]
PINN L-BFGS (freq=10.0): 9%|▉ | 18/200 [00:00<00:03, 53.70it/s]
PINN L-BFGS (freq=10.0): 12%|█▏ | 24/200 [00:00<00:03, 53.43it/s]
PINN L-BFGS (freq=10.0): 15%|█▌ | 30/200 [00:00<00:03, 53.26it/s]
PINN L-BFGS (freq=10.0): 18%|█▊ | 36/200 [00:00<00:03, 53.18it/s]
PINN L-BFGS (freq=10.0): 21%|██ | 42/200 [00:00<00:02, 52.88it/s]
PINN L-BFGS (freq=10.0): 24%|██▍ | 48/200 [00:00<00:02, 52.99it/s]
PINN L-BFGS (freq=10.0): 27%|██▋ | 54/200 [00:01<00:02, 52.99it/s]
PINN L-BFGS (freq=10.0): 30%|███ | 60/200 [00:01<00:02, 52.96it/s]
PINN L-BFGS (freq=10.0): 33%|███▎ | 66/200 [00:01<00:02, 52.89it/s]
PINN L-BFGS (freq=10.0): 36%|███▌ | 72/200 [00:01<00:02, 52.89it/s]
PINN L-BFGS (freq=10.0): 39%|███▉ | 78/200 [00:01<00:02, 52.88it/s]
PINN L-BFGS (freq=10.0): 42%|████▏ | 84/200 [00:01<00:02, 52.92it/s]
PINN L-BFGS (freq=10.0): 45%|████▌ | 90/200 [00:01<00:02, 52.89it/s]
PINN L-BFGS (freq=10.0): 48%|████▊ | 96/200 [00:01<00:01, 53.04it/s]
PINN L-BFGS (freq=10.0): 51%|█████ | 102/200 [00:01<00:01, 52.98it/s]
PINN L-BFGS (freq=10.0): 54%|█████▍ | 108/200 [00:02<00:01, 52.52it/s]
PINN L-BFGS (freq=10.0): 57%|█████▋ | 114/200 [00:02<00:01, 52.37it/s]
PINN L-BFGS (freq=10.0): 60%|██████ | 120/200 [00:02<00:01, 52.26it/s]
PINN L-BFGS (freq=10.0): 63%|██████▎ | 126/200 [00:02<00:01, 51.99it/s]
PINN L-BFGS (freq=10.0): 66%|██████▌ | 132/200 [00:02<00:01, 51.85it/s]
PINN L-BFGS (freq=10.0): 69%|██████▉ | 138/200 [00:02<00:01, 51.94it/s]
PINN L-BFGS (freq=10.0): 72%|███████▏ | 144/200 [00:02<00:01, 52.22it/s]
PINN L-BFGS (freq=10.0): 75%|███████▌ | 150/200 [00:02<00:00, 52.34it/s]
PINN L-BFGS (freq=10.0): 78%|███████▊ | 156/200 [00:02<00:00, 52.52it/s]
PINN L-BFGS (freq=10.0): 81%|████████ | 162/200 [00:03<00:00, 52.69it/s]
PINN L-BFGS (freq=10.0): 84%|████████▍ | 168/200 [00:03<00:00, 52.70it/s]
PINN L-BFGS (freq=10.0): 87%|████████▋ | 174/200 [00:03<00:00, 52.65it/s]
PINN L-BFGS (freq=10.0): 90%|█████████ | 180/200 [00:03<00:00, 52.66it/s]
PINN L-BFGS (freq=10.0): 93%|█████████▎| 186/200 [00:03<00:00, 52.64it/s]
PINN L-BFGS (freq=10.0): 96%|█████████▌| 192/200 [00:03<00:00, 52.79it/s]
PINN L-BFGS (freq=10.0): 99%|█████████▉| 198/200 [00:03<00:00, 52.97it/s]
PINN L-BFGS (freq=10.0): 100%|██████████| 200/200 [00:03<00:00, 52.79it/s]
Saved: ../../sciml-book/chapters/04-pinns/figs/poisson-spectral-bias.png
Freq=2 L2 error: 2.03e-06 Freq=10 L2 error: 4.75e+00
Takeaway: The same architecture and training procedure that produces a near-perfect solution for $\sin(2\pi x)$ fails completely for $\sin(10\pi x)$. Nothing has changed except the frequency of the target. The network has enough parameters to represent the high-frequency solution. The failure is entirely in optimization: gradient descent with smooth activations converges to low-frequency components first, and 15,000 Adam steps plus L-BFGS refinement are not enough to reach the high-frequency target. This is spectral bias, and it is one of the most important practical limitations of PINNs. Strategies to mitigate it include Fourier feature embeddings, multiscale architectures, and curriculum training.