From f4c2b31f9ced1514daaaa6057c826bc53a6c0b8d Mon Sep 17 00:00:00 2001
From: Steve Schmerler <git@elcorto.com>
Date: Thu, 15 May 2025 15:58:30 +0200
Subject: [PATCH] 01_one_dim plotting: add sharex and sharey

---
 BLcourse2.3/01_one_dim.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/BLcourse2.3/01_one_dim.py b/BLcourse2.3/01_one_dim.py
index 52a7628..73c6648 100644
--- a/BLcourse2.3/01_one_dim.py
+++ b/BLcourse2.3/01_one_dim.py
@@ -188,7 +188,7 @@ pprint(extract_model_params(model, raw=False))
 #
 # We sample a number of functions $f_m, m=1,\ldots,M$ from the GP prior and
 # evaluate them at all $\ma X$ = `X_pred` points, of which we have $N=200$. So
-# we effectively generate samples from $p(\predve f|\ma X) = \mathcal N(\ve
+# we effectively generate samples from `pri_f` =  $p(\predve f|\ma X) = \mathcal N(\ve
 # c, \ma K)$. Each sampled vector $\predve f\in\mathbb R^{N}$ represents a
 # sampled *function* $f$ evaluated the $N=200$ points in $\ma X$. The
 # covariance (kernel) matrix is $\ma K\in\mathbb R^{N\times N}$. Its diagonal
@@ -387,7 +387,7 @@ with torch.no_grad():
     post_pred_f = model(X_pred)
     post_pred_y = likelihood(model(X_pred))
 
-    fig, axs = plt.subplots(ncols=2, figsize=(12, 5))
+    fig, axs = plt.subplots(ncols=2, figsize=(12, 5), sharex=True, sharey=True)
     fig_sigmas, ax_sigmas = plt.subplots()
     for ii, (ax, post_pred, name, title) in enumerate(
         zip(
-- 
GitLab