Three Principles, One Representation¶
One PDE. One network. Three losses.
We solve the Poisson equation $-u'' = f$ on $[0,1]$ with $u(0) = u(1) = 0$ using the same neural network architecture but three different optimality principles:
- Strong form (PINN) — minimize the squared $L^2$ residual $\|-\hat{u}'' - f\|^2_{L^2}$
- Weak form (VPINN) — minimize the Galerkin residual against test functions in $H^1_0$
- Energy minimization (Deep Ritz) — minimize the energy functional $E[\hat{u}] = \frac{1}{2}\int(\hat{u}')^2\,dx - \int f\hat{u}\,dx$
The representation is fixed: a 3-layer $\tanh$ network with 32 neurons per layer, wrapped in a strong BC layer $\hat{u}(x) = x(1-x) \cdot N(x;\theta)$ so that $\hat{u}(0) = \hat{u}(1) = 0$ by construction. This eliminates BC penalties entirely, making the comparison purely about the principle.
Only the loss function changes. The experiment reveals which function space each loss lives in and what that means for accuracy, derivative quality, and robustness.
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from tqdm import tqdm
torch.manual_seed(42)
np.random.seed(42)
# --- Problem definition ---
# -u'' = pi^2 sin(pi x) on [0,1], u(0) = u(1) = 0
# Exact solution: u(x) = sin(pi x)
def exact_solution(x):
return np.sin(np.pi * x)
def exact_derivative(x):
return np.pi * np.cos(np.pi * x)
def source_term_torch(x):
return np.pi**2 * torch.sin(np.pi * x)
# --- Network architecture ---
class RawNetwork(nn.Module):
"""3-layer network, 32 neurons per layer."""
def __init__(self, activation=nn.Tanh):
super().__init__()
self.net = nn.Sequential(
nn.Linear(1, 32), activation(),
nn.Linear(32, 32), activation(),
nn.Linear(32, 32), activation(),
nn.Linear(32, 1),
)
def forward(self, x):
return self.net(x)
class StrongBCModel(nn.Module):
"""u_hat(x) = x(1-x) * N(x; theta). Satisfies u(0)=u(1)=0 exactly."""
def __init__(self, activation=nn.Tanh):
super().__init__()
self.nn = RawNetwork(activation)
def forward(self, x):
return x * (1.0 - x) * self.nn(x)
# --- Quadrature (Gauss-Legendre on [0,1]) ---
def gauss_legendre(n=64):
nodes, weights = np.polynomial.legendre.leggauss(n)
nodes = 0.5 * (nodes + 1.0)
weights = 0.5 * weights
return (torch.tensor(nodes, dtype=torch.float32).reshape(-1, 1),
torch.tensor(weights, dtype=torch.float32).reshape(-1, 1))
x_quad, w_quad = gauss_legendre(64)
# Collocation points for strong form
N_colloc = 64
x_colloc = torch.linspace(0.01, 0.99, N_colloc, dtype=torch.float32).reshape(-1, 1)
# Dense evaluation grid
x_eval_np = np.linspace(0, 1, 500)
# Training config
N_ADAM = 10000
N_LBFGS = 200
print("Problem: -u'' = π² sin(πx), u(0) = u(1) = 0")
print(f"Exact solution: u(x) = sin(πx)")
print(f"Representation: u_hat(x) = x(1-x) · N(x; θ), 3×32 tanh network")
print(f"Training: {N_ADAM} Adam + {N_LBFGS} L-BFGS steps")
Helper: compute derivatives and evaluate¶
All three methods need first and/or second derivatives of the network output. We also need a shared evaluation routine for fair comparison.
def compute_derivatives(model, x, order=2):
"""Compute u, u', and optionally u'' at points x."""
x = x.clone().detach().requires_grad_(True)
u = model(x)
du = torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u),
create_graph=True)[0]
if order >= 2:
d2u = torch.autograd.grad(du, x, grad_outputs=torch.ones_like(du),
create_graph=True)[0]
return u, du, d2u
return u, du
def evaluate_model(model, x_np):
"""Evaluate model and its first derivative on a numpy grid."""
x_t = torch.tensor(x_np, dtype=torch.float32).reshape(-1, 1)
x_t.requires_grad_(True)
u = model(x_t)
du = torch.autograd.grad(u, x_t, grad_outputs=torch.ones_like(u),
create_graph=False)[0]
return u.detach().numpy().ravel(), du.detach().numpy().ravel()
def l2_error(u_pred, u_exact):
"""Relative L2 error."""
return np.sqrt(np.mean((u_pred - u_exact)**2)) / np.sqrt(np.mean(u_exact**2))
Principle 1: Strong Form (PINN)¶
Substitute $\hat{u}$ into the PDE and minimize the pointwise residual at collocation points.
$$\mathcal{L}_{\text{strong}}(\theta) = \frac{1}{N}\sum_{i=1}^{N} \bigl| -\hat{u}''(x_i) - f(x_i) \bigr|^2 \approx \|-\hat{u}'' - f\|^2_{L^2(\Omega)}$$
This requires computing $\hat{u}''$ via AD, so the network must live in $H^2$.
def train_pinn(num_epochs=N_ADAM, lr=1e-3):
"""Train using strong-form (PINN) loss. No BC penalty needed."""
torch.manual_seed(42)
model = StrongBCModel()
optimizer = optim.Adam(model.parameters(), lr=lr)
loss_history = []
for epoch in tqdm(range(num_epochs), desc="PINN (strong form)"):
optimizer.zero_grad()
u, du, d2u = compute_derivatives(model, x_colloc)
residual = -d2u - source_term_torch(x_colloc)
loss = torch.mean(residual**2)
loss.backward()
optimizer.step()
loss_history.append(loss.item())
# L-BFGS refinement
opt_lbfgs = optim.LBFGS(model.parameters(), lr=0.5, max_iter=20, history_size=50)
for _ in tqdm(range(N_LBFGS), desc="PINN L-BFGS"):
def closure():
opt_lbfgs.zero_grad()
_, _, d2u = compute_derivatives(model, x_colloc)
loss = torch.mean((-d2u - source_term_torch(x_colloc))**2)
loss.backward()
loss_history.append(loss.item())
return loss
opt_lbfgs.step(closure)
return model, loss_history
model_pinn, loss_pinn = train_pinn()
print(f"Final loss: {loss_pinn[-1]:.2e}")
Principle 2: Weak Form (VPINN)¶
Multiply the PDE by test functions $v_k \in H^1_0$ and integrate by parts. This moves one derivative from $\hat{u}$ onto $v_k$.
$$\int_0^1 \hat{u}'v_k'\,dx = \int_0^1 f v_k\,dx \quad \text{for each test function } v_k$$
$$\mathcal{L}_{\text{weak}}(\theta) = \sum_{k=1}^{K}\left|\int_0^1 \hat{u}'v_k'\,dx - \int_0^1 fv_k\,dx\right|^2$$
We use $v_k(x) = \sin(k\pi x)$ as test functions. These form a basis for $H^1_0([0,1])$ and satisfy the boundary conditions automatically. The integrals are evaluated with Gauss-Legendre quadrature.
Only first derivatives of $\hat{u}$ appear. The network needs only $H^1$, not $H^2$.
def train_vpinn(num_epochs=N_ADAM, lr=1e-3, K=10):
"""Train using weak-form (VPINN) loss with K sine test functions."""
torch.manual_seed(42)
model = StrongBCModel()
optimizer = optim.Adam(model.parameters(), lr=lr)
loss_history = []
# Test functions: v_k(x) = sin(k*pi*x), v_k'(x) = k*pi*cos(k*pi*x)
ks = torch.arange(1, K + 1, dtype=torch.float32).reshape(1, -1)
# Precompute RHS: ∫ f * v_k dx (constants)
f_at_quad = source_term_torch(x_quad)
v_at_quad = torch.sin(ks * np.pi * x_quad)
rhs = (w_quad * f_at_quad * v_at_quad).sum(dim=0)
for epoch in tqdm(range(num_epochs), desc="VPINN (weak form)"):
optimizer.zero_grad()
# Only first derivative needed
u, du = compute_derivatives(model, x_quad, order=1)
dv = ks * np.pi * torch.cos(ks * np.pi * x_quad)
lhs = (w_quad * du * dv).sum(dim=0)
loss = torch.sum((lhs - rhs)**2)
loss.backward()
optimizer.step()
loss_history.append(loss.item())
# L-BFGS refinement
opt_lbfgs = optim.LBFGS(model.parameters(), lr=0.5, max_iter=20, history_size=50)
for _ in tqdm(range(N_LBFGS), desc="VPINN L-BFGS"):
def closure():
opt_lbfgs.zero_grad()
u, du = compute_derivatives(model, x_quad, order=1)
dv = ks * np.pi * torch.cos(ks * np.pi * x_quad)
lhs = (w_quad * du * dv).sum(dim=0)
loss = torch.sum((lhs - rhs)**2)
loss.backward()
loss_history.append(loss.item())
return loss
opt_lbfgs.step(closure)
return model, loss_history
model_vpinn, loss_vpinn = train_vpinn()
print(f"Final loss: {loss_vpinn[-1]:.2e}")
Principle 3: Energy Minimization (Deep Ritz)¶
For the Poisson operator (self-adjoint, positive definite), the solution minimizes the energy functional.
$$E[\hat{u}] = \frac{1}{2}\int_0^1 (\hat{u}')^2\,dx - \int_0^1 f\,\hat{u}\,dx$$
$$\mathcal{L}_{\text{energy}}(\theta) = E[\hat{u}(\cdot;\theta)]$$
Like the weak form, only first derivatives appear. The network needs $H^1$, not $H^2$. The loss is a single scalar (not a sum of squared residuals), and for convex energy functionals the landscape has a unique global minimum.
def train_deep_ritz(num_epochs=N_ADAM, lr=1e-3):
"""Train using energy minimization (Deep Ritz) loss."""
torch.manual_seed(42)
model = StrongBCModel()
optimizer = optim.Adam(model.parameters(), lr=lr)
loss_history = []
f_at_quad = source_term_torch(x_quad)
for epoch in tqdm(range(num_epochs), desc="Deep Ritz (energy)"):
optimizer.zero_grad()
u, du = compute_derivatives(model, x_quad, order=1)
# E[u] = (1/2) ∫ (u')^2 dx - ∫ f u dx
energy = 0.5 * (w_quad * du**2).sum() - (w_quad * f_at_quad * u).sum()
energy.backward()
optimizer.step()
loss_history.append(energy.item())
# L-BFGS refinement
opt_lbfgs = optim.LBFGS(model.parameters(), lr=0.5, max_iter=20, history_size=50)
for _ in tqdm(range(N_LBFGS), desc="Deep Ritz L-BFGS"):
def closure():
opt_lbfgs.zero_grad()
u, du = compute_derivatives(model, x_quad, order=1)
energy = 0.5 * (w_quad * du**2).sum() - (w_quad * f_at_quad * u).sum()
energy.backward()
loss_history.append(energy.item())
return energy
opt_lbfgs.step(closure)
return model, loss_history
model_ritz, loss_ritz = train_deep_ritz()
E_min = -np.pi**2 / 4
print(f"Final energy: {loss_ritz[-1]:.6f} (exact minimum: {E_min:.6f})")
Comparison: Solutions, Derivatives, and Errors¶
Same network, same problem, three different losses. The differences reveal what each principle measures.
# Evaluate all three models
u_exact_eval = exact_solution(x_eval_np)
du_exact_eval = exact_derivative(x_eval_np)
u_pinn, du_pinn = evaluate_model(model_pinn, x_eval_np)
u_vpinn, du_vpinn = evaluate_model(model_vpinn, x_eval_np)
u_ritz, du_ritz = evaluate_model(model_ritz, x_eval_np)
# --- Figure 1: Solutions side by side ---
fig, axes = plt.subplots(1, 3, figsize=(15, 4.5), sharey=True)
methods = [
("PINN (strong form)", u_pinn, "tab:blue"),
("VPINN (weak form)", u_vpinn, "tab:orange"),
("Deep Ritz (energy)", u_ritz, "tab:green"),
]
for ax, (name, u_pred, color) in zip(axes, methods):
err = l2_error(u_pred, u_exact_eval)
ax.plot(x_eval_np, u_pred, color=color, linewidth=2.5, label="Prediction")
ax.plot(x_eval_np, u_exact_eval, color="#333333", linestyle="--",
linewidth=2, label="Exact", zorder=5)
ax.set_xlabel("x", fontsize=12)
ax.set_title(f"{name}\nRelative $L^2$ error: {err:.2e}", fontsize=12)
ax.legend(fontsize=10)
ax.set_xlim([0, 1])
axes[0].set_ylabel("u(x)", fontsize=12)
fig.suptitle("Three Principles, Same Network, Same Problem", fontsize=14, y=1.02)
fig.tight_layout()
plt.show()
# --- Figure 2: Derivative comparison ---
fig, axes = plt.subplots(1, 3, figsize=(15, 4.5), sharey=True)
derivs = [
("PINN (strong form)", du_pinn, "tab:blue"),
("VPINN (weak form)", du_vpinn, "tab:orange"),
("Deep Ritz (energy)", du_ritz, "tab:green"),
]
for ax, (name, du_pred, color) in zip(axes, derivs):
du_err = l2_error(du_pred, du_exact_eval)
ax.plot(x_eval_np, du_pred, color=color, linewidth=2.5, label="Prediction")
ax.plot(x_eval_np, du_exact_eval, color="#333333", linestyle="--",
linewidth=2, label="Exact", zorder=5)
ax.set_xlabel("x", fontsize=12)
ax.set_title(f"{name}\nDerivative $L^2$ error: {du_err:.2e}", fontsize=12)
ax.legend(fontsize=10)
ax.set_xlim([0, 1])
axes[0].set_ylabel("u'(x)", fontsize=12)
fig.suptitle("First Derivative Comparison", fontsize=14, y=1.02)
fig.tight_layout()
plt.show()
# --- Figure 3: Pointwise error (all on one plot) ---
fig, axes = plt.subplots(1, 2, figsize=(14, 5))
# Left: solution error
for name, u_pred, color in methods:
axes[0].semilogy(x_eval_np, np.abs(u_pred - u_exact_eval) + 1e-16,
color=color, linewidth=2, label=name)
axes[0].set_xlabel("x", fontsize=12)
axes[0].set_ylabel("|u_pred - u_exact|", fontsize=12)
axes[0].set_title("Pointwise Solution Error", fontsize=13)
axes[0].legend(fontsize=10)
axes[0].set_xlim([0, 1])
# Right: derivative error
for (name, du_pred, color) in derivs:
axes[1].semilogy(x_eval_np, np.abs(du_pred - du_exact_eval) + 1e-16,
color=color, linewidth=2, label=name)
axes[1].set_xlabel("x", fontsize=12)
axes[1].set_ylabel("|u'_pred - u'_exact|", fontsize=12)
axes[1].set_title("Pointwise Derivative Error", fontsize=13)
axes[1].legend(fontsize=10)
axes[1].set_xlim([0, 1])
fig.tight_layout()
plt.show()
# --- Figure 4: Training convergence ---
fig, ax = plt.subplots(figsize=(10, 5))
ax.semilogy(loss_pinn, color="tab:blue", linewidth=1.5, alpha=0.8,
label="PINN (strong form)")
ax.semilogy(loss_vpinn, color="tab:orange", linewidth=1.5, alpha=0.8,
label="VPINN (weak form)")
# Deep Ritz energy is negative at convergence; plot |E - E_min|
E_min = -np.pi**2 / 4
ritz_gap = [abs(e - E_min) + 1e-16 for e in loss_ritz]
ax.semilogy(ritz_gap, color="tab:green", linewidth=1.5, alpha=0.8,
label="Deep Ritz |E − E_min|")
ax.axvline(x=N_ADAM, color="gray", linestyle=":", alpha=0.5, label="Adam → L-BFGS")
ax.set_xlabel("Training Step", fontsize=12)
ax.set_ylabel("Loss / Energy gap", fontsize=12)
ax.set_title("Training Convergence", fontsize=13)
ax.legend(fontsize=11)
fig.tight_layout()
plt.show()
# --- Summary table ---
print("=" * 70)
print(f"{'Method':<25} {'u L2 error':>15} {'du/dx L2 error':>15} {'BC u(0)':>10} {'u(1)':>10}")
print("-" * 70)
for name, u_pred, du_pred, model in [
("PINN (strong)", u_pinn, du_pinn, model_pinn),
("VPINN (weak)", u_vpinn, du_vpinn, model_vpinn),
("Deep Ritz (energy)", u_ritz, du_ritz, model_ritz),
]:
u_err = l2_error(u_pred, u_exact_eval)
du_err = l2_error(du_pred, du_exact_eval)
bc0 = model(torch.tensor([[0.0]])).item()
bc1 = model(torch.tensor([[1.0]])).item()
print(f"{name:<25} {u_err:>15.2e} {du_err:>15.2e} {bc0:>10.2e} {bc1:>10.2e}")
print("=" * 70)
Three Principles at a Glance¶
The table below summarizes the structural differences. Every row uses the same object ($u$), the same representation (neural network), and the same problem ($-u''=f$). Only the principle varies.
| PINN (strong) | VPINN (weak) | Deep Ritz (energy) | |
|---|---|---|---|
| What you learn | $u$ | $u$ | $u$ |
| Representation | Neural network | Neural network | Neural network |
| What changes | Loss function | Loss function | Loss function |
| Loss | $\frac{1}{N}\sum\lvert -\hat{u}'' - f \rvert^2$ | $\sum_k\lvert\int \hat{u}'v_k' - \int fv_k\rvert^2$ | $\frac{1}{2}\int(\hat{u}')^2 - \int f\hat{u}$ |
| Loss measures | $L^2$ PDE residual | Galerkin residual in $(H^1_0)^*$ | Energy functional |
| Derivatives needed | $\hat{u}''$ (second order) | $\hat{u}'$ (first order) | $\hat{u}'$ (first order) |
| Regularity required | $\hat{u} \in H^2$ | $\hat{u} \in H^1$ | $\hat{u} \in H^1$ |
| Test functions? | No | Yes ($v_k \in H^1_0$) | No |
| Applicability | Any well-posed PDE | Any well-posed PDE | Self-adjoint + positive definite only |
| Classical analog | Finite differences / spectral collocation | Finite elements (FEM) | Classical Ritz method |
The principle determines the function space. The function space determines which activations are admissible. This is not a tuning choice — it is a structural constraint.
What happens with ReLU?¶
The strong form (PINN) requires $\hat{u} \in H^2$, meaning the second derivative must be meaningful. ReLU is piecewise linear: its second derivative is zero almost everywhere. The strong-form loss should collapse to near zero without actually solving the PDE.
The weak form and energy only need first derivatives ($H^1$). ReLU networks are in $H^1$ (the first derivative is piecewise constant). These principles might still work, though with reduced accuracy due to the limited smoothness.
# Train all three with ReLU activation (strong BC enforcement)
def train_pinn_relu(num_epochs=10000, lr=1e-3):
torch.manual_seed(42)
model = StrongBCModel(activation=nn.ReLU)
optimizer = optim.Adam(model.parameters(), lr=lr)
for epoch in range(num_epochs):
optimizer.zero_grad()
u, du, d2u = compute_derivatives(model, x_colloc)
loss = torch.mean((-d2u - source_term_torch(x_colloc))**2)
loss.backward(); optimizer.step()
return model
def train_vpinn_relu(num_epochs=10000, lr=1e-3, K=10):
torch.manual_seed(42)
model = StrongBCModel(activation=nn.ReLU)
optimizer = optim.Adam(model.parameters(), lr=lr)
ks = torch.arange(1, K + 1, dtype=torch.float32).reshape(1, -1)
f_q = source_term_torch(x_quad)
v_q = torch.sin(ks * np.pi * x_quad)
rhs = (w_quad * f_q * v_q).sum(dim=0)
for epoch in range(num_epochs):
optimizer.zero_grad()
u, du = compute_derivatives(model, x_quad, order=1)
dv = ks * np.pi * torch.cos(ks * np.pi * x_quad)
lhs = (w_quad * du * dv).sum(dim=0)
loss = torch.sum((lhs - rhs)**2)
loss.backward(); optimizer.step()
return model
def train_ritz_relu(num_epochs=10000, lr=1e-3):
torch.manual_seed(42)
model = StrongBCModel(activation=nn.ReLU)
optimizer = optim.Adam(model.parameters(), lr=lr)
f_q = source_term_torch(x_quad)
for epoch in range(num_epochs):
optimizer.zero_grad()
u, du = compute_derivatives(model, x_quad, order=1)
energy = 0.5 * (w_quad * du**2).sum() - (w_quad * f_q * u).sum()
energy.backward(); optimizer.step()
return model
print("Training all three with ReLU activation (10k epochs each)...")
model_pinn_relu = train_pinn_relu()
print(" PINN (ReLU) done")
model_vpinn_relu = train_vpinn_relu()
print(" VPINN (ReLU) done")
model_ritz_relu = train_ritz_relu()
print(" Deep Ritz (ReLU) done")
# --- Figure 5: ReLU comparison ---
u_pinn_r, du_pinn_r = evaluate_model(model_pinn_relu, x_eval_np)
u_vpinn_r, du_vpinn_r = evaluate_model(model_vpinn_relu, x_eval_np)
u_ritz_r, du_ritz_r = evaluate_model(model_ritz_relu, x_eval_np)
fig, axes = plt.subplots(2, 3, figsize=(15, 9))
relu_methods = [
("PINN + ReLU", u_pinn_r, du_pinn_r, "tab:blue"),
("VPINN + ReLU", u_vpinn_r, du_vpinn_r, "tab:orange"),
("Deep Ritz + ReLU", u_ritz_r, du_ritz_r, "tab:green"),
]
# Top row: solutions
for ax, (name, u_pred, _, color) in zip(axes[0], relu_methods):
err = l2_error(u_pred, u_exact_eval)
ax.plot(x_eval_np, u_pred, color=color, linewidth=2.5, label="Prediction")
ax.plot(x_eval_np, u_exact_eval, color="#333333", linestyle="--",
linewidth=2, label="Exact", zorder=5)
ax.set_title(f"{name}\nRelative $L^2$ error: {err:.2e}", fontsize=12)
ax.legend(fontsize=10)
ax.set_xlim([0, 1])
axes[0][0].set_ylabel("u(x)", fontsize=12)
# Bottom row: derivatives
for ax, (name, _, du_pred, color) in zip(axes[1], relu_methods):
du_err = l2_error(du_pred, du_exact_eval)
ax.plot(x_eval_np, du_pred, color=color, linewidth=2.5, label="Prediction")
ax.plot(x_eval_np, du_exact_eval, color="#333333", linestyle="--",
linewidth=2, label="Exact", zorder=5)
ax.set_xlabel("x", fontsize=12)
ax.set_title(f"Derivative error: {du_err:.2e}", fontsize=12)
ax.legend(fontsize=10)
ax.set_xlim([0, 1])
axes[1][0].set_ylabel("u'(x)", fontsize=12)
fig.suptitle("ReLU Activation: Strong Form Fails, Weak Form and Energy Survive",
fontsize=14, y=1.02)
fig.tight_layout()
plt.show()
# Summary
print("\nReLU Summary:")
print(f" PINN (strong): u error = {l2_error(u_pinn_r, u_exact_eval):.2e}, "
f"du error = {l2_error(du_pinn_r, du_exact_eval):.2e}")
print(f" VPINN (weak): u error = {l2_error(u_vpinn_r, u_exact_eval):.2e}, "
f"du error = {l2_error(du_vpinn_r, du_exact_eval):.2e}")
print(f" Deep Ritz: u error = {l2_error(u_ritz_r, u_exact_eval):.2e}, "
f"du error = {l2_error(du_ritz_r, du_exact_eval):.2e}")
Summary¶
| Principle | Loss measures | Requires | Method | tanh works? | ReLU works? |
|---|---|---|---|---|---|
| Strong form | $\|-\hat{u}'' - f\|^2_{L^2}$ | $\hat{u} \in H^2$ | PINN | Yes | No (u'' = 0 a.e.) |
| Weak form | Galerkin residual in $(H^1_0)^*$ | $\hat{u} \in H^1$ | VPINN | Yes | Yes (piecewise linear) |
| Energy | $E[\hat{u}] = \frac{1}{2}\|\hat{u}'\|^2 - \langle f, \hat{u}\rangle$ | $\hat{u} \in H^1$ | Deep Ritz | Yes | Yes (piecewise linear) |
The principle determines the function space. The function space determines which activations are admissible. This is not a tuning choice. It is a structural constraint.
Changing the representation from a neural network to piecewise polynomials (finite elements) with the same three principles produces finite differences (strong), FEM (weak), and the classical Ritz method (energy). The principle and the representation are independent axes. The method is their combination.