# Setup
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import linalg
from scipy.stats import multivariate_normal, norm
sns.set_style('whitegrid')
np.random.seed(42)
print("✅ Setup complete!")
Chapter 2: Linear Algebra Solutions¶
Solution 2.1: Gaussian Elimination¶
# System: 2x + 3y - z = 5, 4x + 4y - 3z = 3, -2x + 3y - z = 1
# Augmented matrix [A | b]
Ab = np.array([
[2, 3, -1, 5],
[4, 4, -3, 3],
[-2, 3, -1, 1]
], dtype=float)
print("Original augmented matrix:")
print(Ab)
# Row operations for Gaussian elimination
# R2 = R2 - 2*R1
Ab[1] = Ab[1] - 2 * Ab[0]
# R3 = R3 + R1
Ab[2] = Ab[2] + Ab[0]
print("\nAfter first pivot:")
print(Ab)
# R3 = R3 - 3*R2
Ab[2] = Ab[2] - 3 * Ab[1]
print("\nRow echelon form:")
print(Ab)
# Back substitution
z = Ab[2, 3] / Ab[2, 2]
y = (Ab[1, 3] - Ab[1, 2] * z) / Ab[1, 1]
x = (Ab[0, 3] - Ab[0, 2] * z - Ab[0, 1] * y) / Ab[0, 0]
print(f"\nSolution: x = {x:.2f}, y = {y:.2f}, z = {z:.2f}")
# Verification
A = np.array([[2, 3, -1], [4, 4, -3], [-2, 3, -1]])
b = np.array([5, 3, 1])
solution = np.array([x, y, z])
print(f"Verification (Ax should equal b): {np.allclose(A @ solution, b)}")
print(f"Ax = {A @ solution}")
print(f"b = {b}")
Solution 2.2: Linear Independence¶
# Vectors
v1 = np.array([1, 2, 3])
v2 = np.array([2, 4, 6])
v3 = np.array([1, 1, 1])
# Form matrix with vectors as columns
V = np.column_stack([v1, v2, v3])
print("Matrix V (vectors as columns):")
print(V)
# Check rank
rank = np.linalg.matrix_rank(V)
print(f"\nRank of V: {rank}")
print(f"Number of vectors: {V.shape[1]}")
if rank < V.shape[1]:
print("\n❌ Vectors are LINEARLY DEPENDENT")
print("Reason: v2 = 2*v1 (second vector is a scalar multiple of the first)")
else:
print("\n✅ Vectors are linearly independent")
# Verify: v2 = 2*v1
print(f"\nv2 = {v2}")
print(f"2*v1 = {2*v1}")
print(f"v2 == 2*v1: {np.allclose(v2, 2*v1)}")
Solution 2.3: Basis and Dimension¶
# Basis vectors
v1 = np.array([1, 0, 1])
v2 = np.array([0, 1, 1])
v3 = np.array([1, 1, 0])
# Form basis matrix
B = np.column_stack([v1, v2, v3])
print("Basis matrix B:")
print(B)
# 1. Verify they form a basis (rank = 3 for ℝ³)
rank = np.linalg.matrix_rank(B)
print(f"\nRank: {rank}")
print(f"Dimension of ℝ³: 3")
print(f"Forms a basis: {rank == 3}")
# 2. Express [2, 3, 1] in this basis
target = np.array([2, 3, 1])
print(f"\nTarget vector in standard basis: {target}")
# Solve: B * coords = target
coords_in_basis = np.linalg.solve(B, target)
print(f"Coordinates in new basis: {coords_in_basis}")
# 3. Convert back to verify
reconstructed = B @ coords_in_basis
print(f"\nReconstructed vector: {reconstructed}")
print(f"Original target: {target}")
print(f"Verification successful: {np.allclose(reconstructed, target)}")
# Interpretation
print(f"\n[2, 3, 1] = {coords_in_basis[0]:.2f}*v1 + {coords_in_basis[1]:.2f}*v2 + {coords_in_basis[2]:.2f}*v3")
Solution 2.4: Linear Transformations¶
# 1. Construct transformation matrix
theta = np.pi / 4 # 45 degrees in radians
# Rotation matrix
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
# Scaling matrix (scale x by 2)
S = np.array([[2, 0],
[0, 1]])
# Combined transformation (rotate THEN scale)
T = S @ R # Note: order matters! S @ R ≠ R @ S
print("Rotation matrix (45°):")
print(R)
print("\nScaling matrix (x by 2):")
print(S)
print("\nCombined transformation T = S @ R:")
print(T)
# 2. Apply to unit square
square = np.array([[0, 1, 1, 0, 0], # x coordinates (closed loop)
[0, 0, 1, 1, 0]]) # y coordinates
transformed_square = T @ square
# 3. Plot
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
# Original
ax1.plot(square[0], square[1], 'b-o', linewidth=2, label='Original')
ax1.grid(True, alpha=0.3)
ax1.set_aspect('equal')
ax1.set_xlim(-1, 3)
ax1.set_ylim(-1, 2)
ax1.set_title('Original Unit Square')
ax1.legend()
ax1.axhline(y=0, color='k', linewidth=0.5)
ax1.axvline(x=0, color='k', linewidth=0.5)
# Transformed
ax2.plot(square[0], square[1], 'b--', alpha=0.3, label='Original')
ax2.plot(transformed_square[0], transformed_square[1], 'r-o', linewidth=2, label='Transformed')
ax2.grid(True, alpha=0.3)
ax2.set_aspect('equal')
ax2.set_xlim(-1, 3)
ax2.set_ylim(-1, 2)
ax2.set_title('After: Scale x by 2, Rotate 45°')
ax2.legend()
ax2.axhline(y=0, color='k', linewidth=0.5)
ax2.axvline(x=0, color='k', linewidth=0.5)
plt.tight_layout()
plt.show()
print(f"\nTransformed vertices:")
for i in range(4):
print(f"({square[0,i]:.1f}, {square[1,i]:.1f}) → ({transformed_square[0,i]:.2f}, {transformed_square[1,i]:.2f})")
Chapter 3: Analytic Geometry Solutions¶
Solution 3.1: Norms and Distances¶
x = np.array([3, 4])
y = np.array([1, 2])
# 1. L1 norm (Manhattan)
l1_norm = np.sum(np.abs(x))
print(f"L1 norm of x: {l1_norm}")
print(f" (|3| + |4| = {l1_norm})")
# 2. L2 norm (Euclidean)
l2_norm = np.sqrt(np.sum(x**2))
l2_norm_builtin = np.linalg.norm(x)
print(f"\nL2 norm of x: {l2_norm:.4f}")
print(f" (√(3² + 4²) = √25 = 5)")
print(f" Verification with np.linalg.norm: {l2_norm_builtin}")
# 3. L∞ norm (Maximum)
linf_norm = np.max(np.abs(x))
print(f"\nL∞ norm of x: {linf_norm}")
print(f" (max(|3|, |4|) = 4)")
# 4. Distance between x and y
distance = np.linalg.norm(x - y)
print(f"\nL2 distance between x and y: {distance:.4f}")
print(f" (||[3,4] - [1,2]|| = ||[2,2]|| = √8 = {np.sqrt(8):.4f})")
# 5. Cosine similarity
cos_sim = np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))
print(f"\nCosine similarity: {cos_sim:.4f}")
print(f" Formula: (x·y) / (||x|| ||y||)")
print(f" = ({x[0]*y[0] + x[1]*y[1]}) / ({np.linalg.norm(x):.2f} * {np.linalg.norm(y):.2f})")
print(f" = {np.dot(x,y)} / {np.linalg.norm(x) * np.linalg.norm(y):.2f}")
# Angle between vectors
angle_rad = np.arccos(cos_sim)
angle_deg = np.degrees(angle_rad)
print(f"\nAngle between x and y: {angle_deg:.2f}°")
Solution 3.2: Gram-Schmidt Orthogonalization¶
v1 = np.array([3, 1], dtype=float)
v2 = np.array([2, 2], dtype=float)
print("Original vectors:")
print(f"v1 = {v1}")
print(f"v2 = {v2}")
# 1. Gram-Schmidt orthogonalization
u1 = v1
# Project v2 onto u1
proj_v2_u1 = (np.dot(v2, u1) / np.dot(u1, u1)) * u1
u2 = v2 - proj_v2_u1
print(f"\nOrthogonal vectors:")
print(f"u1 = {u1}")
print(f"u2 = {u2}")
# 2. Normalize to get orthonormal vectors
e1 = u1 / np.linalg.norm(u1)
e2 = u2 / np.linalg.norm(u2)
print(f"\nOrthonormal vectors:")
print(f"e1 = {e1}")
print(f"e2 = {e2}")
# 3. Verify orthogonality (dot product = 0)
dot_product = np.dot(e1, e2)
print(f"\nVerification:")
print(f"e1 · e2 = {dot_product:.10f} (should be ~0)")
print(f"Orthogonal: {np.abs(dot_product) < 1e-10}")
# 4. Verify unit length
print(f"\n||e1|| = {np.linalg.norm(e1):.10f} (should be 1)")
print(f"||e2|| = {np.linalg.norm(e2):.10f} (should be 1)")
print(f"Unit vectors: {np.allclose([np.linalg.norm(e1), np.linalg.norm(e2)], [1, 1])}")
# Visualize
plt.figure(figsize=(10, 5))
# Original vectors
plt.subplot(1, 2, 1)
plt.arrow(0, 0, v1[0], v1[1], head_width=0.2, head_length=0.2, fc='blue', ec='blue', label='v1')
plt.arrow(0, 0, v2[0], v2[1], head_width=0.2, head_length=0.2, fc='red', ec='red', label='v2')
plt.grid(True, alpha=0.3)
plt.axis('equal')
plt.xlim(-0.5, 4)
plt.ylim(-0.5, 3)
plt.legend()
plt.title('Original Vectors (Not Orthogonal)')
# Orthonormal vectors
plt.subplot(1, 2, 2)
plt.arrow(0, 0, e1[0], e1[1], head_width=0.1, head_length=0.1, fc='blue', ec='blue', label='e1')
plt.arrow(0, 0, e2[0], e2[1], head_width=0.1, head_length=0.1, fc='green', ec='green', label='e2')
plt.grid(True, alpha=0.3)
plt.axis('equal')
plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 1.5)
plt.legend()
plt.title('Orthonormal Vectors (Perpendicular, Unit Length)')
plt.tight_layout()
plt.show()
Solution 3.3: Projection and Distance¶
p = np.array([5, 5])
v = np.array([1, 2])
# 1. Project p onto line defined by v
proj_p = (np.dot(p, v) / np.dot(v, v)) * v
print(f"Point p: {p}")
print(f"Line direction v: {v}")
print(f"\nProjection of p onto line: {proj_p}")
print(f" Formula: proj_v(p) = (p·v / v·v) * v")
print(f" = ({np.dot(p,v)} / {np.dot(v,v)}) * {v}")
# 2. Distance from p to line
distance_to_line = np.linalg.norm(p - proj_p)
print(f"\nDistance from p to line: {distance_to_line:.4f}")
# 3. Visualize
plt.figure(figsize=(8, 8))
# Line (extend in both directions)
t = np.linspace(-1, 4, 100)
line = np.outer(t, v)
plt.plot(line[:, 0], line[:, 1], 'b-', linewidth=2, label='Line (direction v)', alpha=0.5)
# Point p
plt.plot(p[0], p[1], 'ro', markersize=10, label='Point p')
# Projection
plt.plot(proj_p[0], proj_p[1], 'go', markersize=10, label='Projection')
# Perpendicular line from p to projection
plt.plot([p[0], proj_p[0]], [p[1], proj_p[1]], 'r--', linewidth=2, label=f'Distance = {distance_to_line:.2f}')
plt.grid(True, alpha=0.3)
plt.axis('equal')
plt.xlim(-1, 6)
plt.ylim(-1, 9)
plt.legend()
plt.title('Projection of Point onto Line')
plt.xlabel('x')
plt.ylabel('y')
# Add annotations
plt.annotate('p', xy=p, xytext=(p[0]+0.3, p[1]+0.3), fontsize=12, color='red')
plt.annotate('proj(p)', xy=proj_p, xytext=(proj_p[0]-1, proj_p[1]-0.5), fontsize=12, color='green')
plt.show()
# Verify perpendicularity
perpendicular_vec = p - proj_p
dot_check = np.dot(perpendicular_vec, v)
print(f"\nVerification: perpendicular vector · v = {dot_check:.10f} (should be ~0)")
print(f"Perpendicular: {np.abs(dot_check) < 1e-10}")
Solution 3.4: Angles and Orthogonality¶
a = np.array([1, 1, 1])
b = np.array([1, -1, 0])
c = np.array([2, 0, -2])
def angle_between(v1, v2):
"""Calculate angle in degrees between two vectors."""
cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
# Clip to avoid numerical errors
cos_angle = np.clip(cos_angle, -1, 1)
angle_rad = np.arccos(cos_angle)
return np.degrees(angle_rad)
# 1. Angle between a and b
angle_ab = angle_between(a, b)
print(f"Vectors:")
print(f"a = {a}")
print(f"b = {b}")
print(f"c = {c}")
print(f"\n1. Angle between a and b: {angle_ab:.2f}°")
print(f" Calculation: a·b = {np.dot(a,b)}, ||a|| = {np.linalg.norm(a):.3f}, ||b|| = {np.linalg.norm(b):.3f}")
# 2. Angle between b and c
angle_bc = angle_between(b, c)
print(f"\n2. Angle between b and c: {angle_bc:.2f}°")
# 3. Check orthogonality (angle = 90°)
print(f"\n3. Orthogonality check:")
print(f" a·b = {np.dot(a,b)} → {'Orthogonal ✓' if np.abs(np.dot(a,b)) < 1e-10 else 'Not orthogonal'}")
print(f" a·c = {np.dot(a,c)} → {'Orthogonal ✓' if np.abs(np.dot(a,c)) < 1e-10 else 'Not orthogonal'}")
print(f" b·c = {np.dot(b,c)} → {'Orthogonal ✓' if np.abs(np.dot(b,c)) < 1e-10 else 'Not orthogonal'}")
# 4. Find vector orthogonal to both a and b (cross product)
orthogonal = np.cross(a, b)
print(f"\n4. Vector orthogonal to both a and b (cross product):")
print(f" a × b = {orthogonal}")
# Verify orthogonality
print(f"\n Verification:")
print(f" (a × b) · a = {np.dot(orthogonal, a):.10f} (should be 0)")
print(f" (a × b) · b = {np.dot(orthogonal, b):.10f} (should be 0)")
# Visual representation of angles
print(f"\n📊 Summary:")
print(f" ∠(a,b) = {angle_ab:.1f}° (acute angle)")
print(f" ∠(b,c) = {angle_bc:.1f}° ({'right angle!' if np.abs(angle_bc - 90) < 1 else 'oblique'})")
Chapter 4: Matrix Decompositions Solutions¶
Solution 4.1: Eigenvalues and Eigenvectors¶
A = np.array([[4, 1],
[2, 3]])
# 1 & 2. Compute eigenvalues and eigenvectors
eigenvalues, eigenvectors = np.linalg.eig(A)
print("Matrix A:")
print(A)
print(f"\nEigenvalues: {eigenvalues}")
print(f"\nEigenvectors (as columns):")
print(eigenvectors)
# 3. Verify Av = λv for each pair
print(f"\nVerification (Av = λv):")
for i in range(len(eigenvalues)):
λ = eigenvalues[i]
v = eigenvectors[:, i]
Av = A @ v
λv = λ * v
print(f"\nEigenvalue λ{i+1} = {λ:.4f}")
print(f"Eigenvector v{i+1} = {v}")
print(f"Av = {Av}")
print(f"λv = {λv}")
print(f"Match: {np.allclose(Av, λv)}")
# 4. Visualize transformation effect
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6))
# Left plot: Eigenvectors and their transformations
for i in range(len(eigenvalues)):
v = eigenvectors[:, i]
Av = A @ v
# Original eigenvector
ax1.arrow(0, 0, v[0], v[1], head_width=0.1, head_length=0.1,
fc=f'C{i}', ec=f'C{i}', linewidth=2, label=f'v{i+1}')
# Transformed eigenvector
ax1.arrow(0, 0, Av[0], Av[1], head_width=0.1, head_length=0.1,
fc=f'C{i}', ec=f'C{i}', linewidth=2, linestyle='--', alpha=0.7, label=f'Av{i+1} = λ{i+1}v{i+1}')
ax1.grid(True, alpha=0.3)
ax1.axhline(y=0, color='k', linewidth=0.5)
ax1.axvline(x=0, color='k', linewidth=0.5)
ax1.set_aspect('equal')
ax1.set_xlim(-2, 6)
ax1.set_ylim(-2, 4)
ax1.legend()
ax1.set_title('Eigenvectors: Av = λv (scaled, same direction)')
# Right plot: General vectors (not eigenvectors)
test_vectors = np.array([[1, 0], [0, 1], [1, 1]]).T
colors = ['red', 'green', 'purple']
for i in range(test_vectors.shape[1]):
v = test_vectors[:, i]
Av = A @ v
ax2.arrow(0, 0, v[0], v[1], head_width=0.1, head_length=0.1,
fc=colors[i], ec=colors[i], linewidth=2, alpha=0.5)
ax2.arrow(0, 0, Av[0], Av[1], head_width=0.1, head_length=0.1,
fc=colors[i], ec=colors[i], linewidth=2, linestyle='--')
ax2.grid(True, alpha=0.3)
ax2.axhline(y=0, color='k', linewidth=0.5)
ax2.axvline(x=0, color='k', linewidth=0.5)
ax2.set_aspect('equal')
ax2.set_xlim(-1, 6)
ax2.set_ylim(-1, 5)
ax2.set_title('General vectors: Direction changes (solid → dashed)')
plt.tight_layout()
plt.show()
print("\n💡 Key Insight: Eigenvectors maintain their direction under transformation A!")
Solution 4.2: Eigendecomposition¶
S = np.array([[5, 2],
[2, 5]])
print("Symmetric matrix S:")
print(S)
# 1. Eigendecomposition S = PDP^T
eigenvalues, P = np.linalg.eigh(S) # eigh for symmetric matrices
D = np.diag(eigenvalues)
print(f"\nEigenvalues (diagonal of D): {eigenvalues}")
print(f"\nEigenvector matrix P:")
print(P)
print(f"\nDiagonal matrix D:")
print(D)
# 2. Verify reconstruction
S_reconstructed = P @ D @ P.T
print(f"\nReconstructed S = P @ D @ P^T:")
print(S_reconstructed)
print(f"\nReconstruction successful: {np.allclose(S, S_reconstructed)}")
# 3. Compute S² using eigendecomposition
# S² = (PDP^T)² = PD²P^T (faster!)
D_squared = np.diag(eigenvalues**2)
S_squared_eigen = P @ D_squared @ P.T
# Compare with direct multiplication
S_squared_direct = S @ S
print(f"\nS² via eigendecomposition:")
print(S_squared_eigen)
print(f"\nS² via direct multiplication:")
print(S_squared_direct)
print(f"\nMethods match: {np.allclose(S_squared_eigen, S_squared_direct)}")
# 4. Compute matrix square root √S
D_sqrt = np.diag(np.sqrt(eigenvalues))
S_sqrt = P @ D_sqrt @ P.T
print(f"\nMatrix square root √S:")
print(S_sqrt)
# Verify: (√S)² should equal S
verification = S_sqrt @ S_sqrt
print(f"\nVerification (√S)²:")
print(verification)
print(f"(√S)² = S: {np.allclose(verification, S)}")
print(f"\n💡 Power of Eigendecomposition:")
print(f" • S^n = P D^n P^T (just exponentiate diagonal elements!)")
print(f" • Matrix functions: f(S) = P f(D) P^T")
print(f" • Much faster for large matrices")
Solution 4.3: Singular Value Decomposition¶
M = np.array([[3, 2, 2],
[2, 3, -2]])
print("Matrix M (2×3):")
print(M)
# 1. Compute SVD: M = UΣV^T
U, s, Vt = np.linalg.svd(M, full_matrices=True)
print(f"\nU (2×2) - Left singular vectors:")
print(U)
print(f"\nSingular values: {s}")
print(f"\nV^T (3×3) - Right singular vectors (transposed):")
print(Vt)
# Construct full Σ matrix (2×3)
Sigma = np.zeros((2, 3))
Sigma[:2, :2] = np.diag(s)
print(f"\nΣ (2×3) matrix:")
print(Sigma)
# 2. Verify reconstruction
M_reconstructed = U @ Sigma @ Vt
print(f"\nReconstructed M = U @ Σ @ V^T:")
print(M_reconstructed)
print(f"Reconstruction successful: {np.allclose(M, M_reconstructed)}")
# 3. Rank of M
rank = np.linalg.matrix_rank(M)
num_nonzero_sv = np.sum(s > 1e-10)
print(f"\nRank of M: {rank}")
print(f"Number of non-zero singular values: {num_nonzero_sv}")
print(f"Rank = # of non-zero singular values: {rank == num_nonzero_sv}")
# 4. Low-rank approximation (rank-1)
# Keep only largest singular value
Sigma_rank1 = np.zeros_like(Sigma)
Sigma_rank1[0, 0] = s[0]
M_rank1 = U @ Sigma_rank1 @ Vt
print(f"\nRank-1 approximation:")
print(M_rank1)
# Reconstruction error (Frobenius norm)
error = np.linalg.norm(M - M_rank1, 'fro')
relative_error = error / np.linalg.norm(M, 'fro')
print(f"\nReconstruction error (Frobenius norm): {error:.4f}")
print(f"Relative error: {relative_error:.2%}")
# Theoretical optimal error (sum of discarded singular values squared)
optimal_error = np.sqrt(np.sum(s[1:]**2))
print(f"\nTheoretical optimal error: {optimal_error:.4f}")
print(f"Match: {np.allclose(error, optimal_error)}")
print(f"\n💡 SVD Applications:")
print(f" • Image compression (keep top-k singular values)")
print(f" • PCA (principal components are columns of V)")
print(f" • Pseudoinverse (solve least squares)")
print(f" • Matrix rank determination")
Solution 4.4: Cholesky Decomposition¶
A = np.array([[4, 2],
[2, 3]], dtype=float)
print("Matrix A:")
print(A)
# 1. Verify positive definite (eigenvalues > 0)
eigenvalues = np.linalg.eigvalsh(A)
print(f"\nEigenvalues: {eigenvalues}")
is_pos_def = np.all(eigenvalues > 0)
print(f"All eigenvalues > 0: {is_pos_def}")
print(f"✓ Matrix is positive definite" if is_pos_def else "✗ Matrix is NOT positive definite")
# 2. Cholesky decomposition A = LL^T
L = np.linalg.cholesky(A)
print(f"\nCholesky factor L (lower triangular):")
print(L)
# Verify
A_reconstructed = L @ L.T
print(f"\nReconstructed A = L @ L^T:")
print(A_reconstructed)
print(f"Reconstruction successful: {np.allclose(A, A_reconstructed)}")
# 3. Solve Ax = b using forward/backward substitution
b = np.array([1, 2])
print(f"\nSolve Ax = b where b = {b}")
# A = LL^T, so Ax = b becomes LL^T x = b
# Step 1: Solve Ly = b (forward substitution)
y = np.linalg.solve(L, b)
print(f"\nStep 1: Solve Ly = b")
print(f"y = {y}")
# Step 2: Solve L^T x = y (backward substitution)
x_cholesky = np.linalg.solve(L.T, y)
print(f"\nStep 2: Solve L^T x = y")
print(f"x = {x_cholesky}")
# Verify solution
print(f"\nVerification Ax:")
print(f"Ax = {A @ x_cholesky}")
print(f"b = {b}")
print(f"Solution correct: {np.allclose(A @ x_cholesky, b)}")
# 4. Compare with direct solve
import time
x_direct = np.linalg.solve(A, b)
print(f"\nDirect solve result: {x_direct}")
print(f"Methods match: {np.allclose(x_cholesky, x_direct)}")
# For larger matrices, Cholesky is ~2x faster
print(f"\n💡 Cholesky Benefits:")
print(f" • ~2x faster than LU decomposition for positive definite matrices")
print(f" • More numerically stable")
print(f" • Used in: Gaussian processes, Kalman filtering, optimization")
print(f" • Requirement: Matrix must be symmetric and positive definite")
Solutions continue in next cells for Chapters 5-7 and bonus challenge…
(Note: For brevity in this preview, I’m showing the first 4 chapters. The complete solutions notebook would include all chapters.)