Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
B
Bayesian Statistical Learning 2
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Alina Bazarova
Bayesian Statistical Learning 2
Commits
90eafd04
Commit
90eafd04
authored
1 year ago
by
Steve Schmerler
Browse files
Options
Downloads
Patches
Plain Diff
Move common code to utils
parent
cf605278
No related branches found
No related tags found
1 merge request
!1
BLcourse2.3Add GP part
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
BLcourse2.3/01_one_dim.py
+10
-61
10 additions, 61 deletions
BLcourse2.3/01_one_dim.py
BLcourse2.3/02_two_dim.py
+27
-97
27 additions, 97 deletions
BLcourse2.3/02_two_dim.py
BLcourse2.3/utils.py
+62
-0
62 additions, 0 deletions
BLcourse2.3/utils.py
with
99 additions
and
158 deletions
BLcourse2.3/01_one_dim.py
+
10
−
61
View file @
90eafd04
...
...
@@ -34,8 +34,10 @@
# # Imports, helpers, setup
# +
# %matplotlib inline
# %matplotlib widget
# -
# -
import
math
from
collections
import
defaultdict
from
pprint
import
pprint
...
...
@@ -45,43 +47,7 @@ import gpytorch
from
matplotlib
import
pyplot
as
plt
from
matplotlib
import
is_interactive
def
extract_model_params
(
model
,
raw
=
False
)
->
dict
:
"""
Helper to convert model.named_parameters() to dict.
With raw=True, use
foo.bar.raw_param
else
foo.bar.param
See https://docs.gpytorch.ai/en/stable/examples/00_Basic_Usage/Hyperparameters.html#Raw-vs-Actual-Parameters
"""
if
raw
:
return
dict
(
(
p_name
,
p_val
.
item
())
for
p_name
,
p_val
in
model
.
named_parameters
()
)
else
:
out
=
dict
()
# p_name = 'covar_module.base_kernel.raw_lengthscale'. Access
# model.covar_module.base_kernel.lengthscale (w/o the raw_)
for
p_name
,
p_val
in
model
.
named_parameters
():
# Yes, eval() hack. Sorry.
p_name
=
p_name
.
replace
(
"
.raw_
"
,
"
.
"
)
p_val
=
eval
(
f
"
model.
{
p_name
}
"
)
out
[
p_name
]
=
p_val
.
item
()
return
out
def
plot_samples
(
ax
,
X_pred
,
samples
,
label
=
None
,
**
kwds
):
plot_kwds
=
dict
(
color
=
"
tab:green
"
,
alpha
=
0.3
)
plot_kwds
.
update
(
kwds
)
if
label
is
None
:
ax
.
plot
(
X_pred
,
samples
.
T
,
**
plot_kwds
)
else
:
ax
.
plot
(
X_pred
,
samples
[
0
,
:],
**
plot_kwds
,
label
=
label
)
ax
.
plot
(
X_pred
,
samples
[
1
:,
:].
T
,
**
plot_kwds
,
label
=
"
_
"
)
from
utils
import
extract_model_params
,
plot_samples
,
ExactGPModel
# Default float32 results in slightly noisy prior samples. Less so with
...
...
@@ -155,29 +121,6 @@ plt.scatter(X_train, y_train, marker="o", color="tab:blue")
# +
class
ExactGPModel
(
gpytorch
.
models
.
ExactGP
):
"""
API:
model.forward() prior f_pred
model() posterior f_pred
likelihood(model.forward()) prior with noise y_pred
likelihood(model()) posterior with noise y_pred
"""
def
__init__
(
self
,
X_train
,
y_train
,
likelihood
):
super
().
__init__
(
X_train
,
y_train
,
likelihood
)
self
.
mean_module
=
gpytorch
.
means
.
ConstantMean
()
self
.
covar_module
=
gpytorch
.
kernels
.
ScaleKernel
(
gpytorch
.
kernels
.
RBFKernel
()
)
def
forward
(
self
,
x
):
mean_x
=
self
.
mean_module
(
x
)
covar_x
=
self
.
covar_module
(
x
)
return
gpytorch
.
distributions
.
MultivariateNormal
(
mean_x
,
covar_x
)
likelihood
=
gpytorch
.
likelihoods
.
GaussianLikelihood
()
model
=
ExactGPModel
(
X_train
,
y_train
,
likelihood
)
# -
...
...
@@ -291,7 +234,9 @@ for ii in range(n_iter):
for
p_name
,
p_val
in
extract_model_params
(
model
).
items
():
history
[
p_name
].
append
(
p_val
)
history
[
"
loss
"
].
append
(
loss
.
item
())
# -
# +
# Plot hyper params and loss (neg. log marginal likelihood) convergence
ncols
=
len
(
history
)
fig
,
axs
=
plt
.
subplots
(
ncols
=
ncols
,
nrows
=
1
,
figsize
=
(
ncols
*
5
,
5
))
...
...
@@ -301,6 +246,10 @@ for ax, (p_name, p_lst) in zip(axs, history.items()):
ax
.
set_xlabel
(
"
iterations
"
)
# -
# +
# Values of optimized hyper params
pprint
(
extract_model_params
(
model
,
raw
=
False
))
# -
# # Run prediction
#
...
...
This diff is collapsed.
Click to expand it.
BLcourse2.3/02_two_dim.py
+
27
−
97
View file @
90eafd04
...
...
@@ -13,10 +13,9 @@
# name: python3
# ---
# +
# %matplotlib inline
# %matplotlib notebook
import
math
# +
from
collections
import
defaultdict
from
pprint
import
pprint
...
...
@@ -24,59 +23,17 @@ import torch
import
gpytorch
from
matplotlib
import
pyplot
as
plt
from
matplotlib
import
is_interactive
##from mpl_toolkits.mplot3d import Axes3D
def
extract_model_params
(
model
,
raw
=
False
)
->
dict
:
"""
Helper to convert model.named_parameters() to dict.
from
utils
import
extract_model_params
,
plot_samples
,
ExactGPModel
# -
With raw=True, use
foo.bar.raw_param
else
foo.bar.param
See https://docs.gpytorch.ai/en/stable/examples/00_Basic_Usage/Hyperparameters.html#Raw-vs-Actual-Parameters
"""
if
raw
:
return
dict
(
(
p_name
,
p_val
.
item
())
for
p_name
,
p_val
in
model
.
named_parameters
()
)
else
:
out
=
dict
()
# p_name = 'covar_module.base_kernel.raw_lengthscale'. Access
# model.covar_module.base_kernel.lengthscale (w/o the raw_)
for
p_name
,
p_val
in
model
.
named_parameters
():
# Yes, eval() hack. Sorry.
p_name
=
p_name
.
replace
(
"
.raw_
"
,
"
.
"
)
p_val
=
eval
(
f
"
model.
{
p_name
}
"
)
out
[
p_name
]
=
p_val
.
item
()
return
out
# Default float32 results in slightly noisy prior samples. Less so with
# float64. We get a warning with both
# .../lib/python3.11/site-packages/linear_operator/utils/cholesky.py:40:
# NumericalWarning: A not p.d., added jitter of 1.0e-08 to the diagonal
# but the noise is smaller w/ float64. Reason must be that the `sample()`
# method [1] calls `rsample()` [2] which performs a Cholesky decomposition of
# the covariance matrix. The default in
# np.random.default_rng().multivariate_normal() is method="svd", which is
# slower but seemingly a bit more stable.
#
# [1] https://docs.gpytorch.ai/en/stable/distributions.html#gpytorch.distributions.MultivariateNormal.sample
# [2] https://docs.gpytorch.ai/en/stable/distributions.html#gpytorch.distributions.MultivariateNormal.rsample
torch
.
set_default_dtype
(
torch
.
float64
)
torch
.
manual_seed
(
123
)
# -
# # Generate toy 2D data
#
# Here we generate noisy 1D data `X_train`, `y_train` as well as an extended
# x-axis `X_pred` which we use later for prediction also outside of the data
# range (extrapolation). The data has a constant offset `const` which we use to
# test learning a GP mean function $m(\ve x)$. We create a gap in the data to
# show how the model uncertainty will behave there.
# +
...
...
@@ -140,37 +97,6 @@ class MexicanHat(SurfaceData):
return
y
*
torch
.
cos
(
r
)
/
r
**
2
-
y
*
torch
.
sin
(
r
)
/
r
**
3.0
# -
# # Define GP model
# +
class
ExactGPModel
(
gpytorch
.
models
.
ExactGP
):
"""
API:
model.forward() prior f_pred
model() posterior f_pred
likelihood(model.forward()) prior with noise y_pred
likelihood(model()) posterior with noise y_pred
"""
def
__init__
(
self
,
X_train
,
y_train
,
likelihood
):
super
().
__init__
(
X_train
,
y_train
,
likelihood
)
self
.
mean_module
=
gpytorch
.
means
.
ConstantMean
()
self
.
covar_module
=
gpytorch
.
kernels
.
ScaleKernel
(
gpytorch
.
kernels
.
RBFKernel
()
)
def
forward
(
self
,
x
):
mean_x
=
self
.
mean_module
(
x
)
covar_x
=
self
.
covar_module
(
x
)
return
gpytorch
.
distributions
.
MultivariateNormal
(
mean_x
,
covar_x
)
# -
# +
data_train
=
MexicanHat
(
xlim
=
[
-
15
,
5
],
ylim
=
[
-
15
,
25
],
nx
=
20
,
ny
=
20
,
mode
=
"
rand
"
...
...
@@ -179,19 +105,25 @@ data_pred = MexicanHat(
xlim
=
[
-
15
,
5
],
ylim
=
[
-
15
,
25
],
nx
=
100
,
ny
=
100
,
mode
=
"
grid
"
)
# train inputs
X_train
=
data_train
.
X
f_train
=
data_train
.
z
##y_train = f_train + torch.randn(size=f_train.shape) / 20
y_train
=
f_train
mask
=
(
X_train
[:,
0
]
<
-
5
)
&
(
X_train
[:,
1
]
<
0
)
# apply mask
X_train
=
X_train
[
~
mask
,
:]
y_train
=
y_train
[
~
mask
]
# inputs for prediction and plotting
X_pred
=
data_pred
.
X
# noise-free train data
##y_train = data_train.z
# noisy train data
y_train
=
data_train
.
z
+
torch
.
randn
(
size
=
data_train
.
z
.
shape
)
/
20
# +
# Cut out part of the train data to create out-of-distribution predictions
##mask = (X_train[:, 0] < -5) & (X_train[:, 1] < 0)
##X_train = X_train[~mask, :]
##y_train = y_train[~mask]
# -
fig
=
plt
.
figure
()
ax
=
fig
.
add_subplot
(
projection
=
"
3d
"
)
ax
.
plot_surface
(
...
...
@@ -206,12 +138,11 @@ ax.scatter(
)
ax
.
set_xlabel
(
"
X_0
"
)
ax
.
set_ylabel
(
"
X_1
"
)
# -
# +
# # Define GP model
likelihood
=
gpytorch
.
likelihoods
.
GaussianLikelihood
()
model
=
ExactGPModel
(
X_train
,
y_train
,
likelihood
)
# -
# Inspect the model
...
...
@@ -228,8 +159,6 @@ model.covar_module.outputscale = 1.0
model
.
likelihood
.
noise_covar
.
noise
=
0.1
pprint
(
extract_model_params
(
model
,
raw
=
False
))
# -
# +
# Train mode
...
...
@@ -251,6 +180,7 @@ for ii in range(n_iter):
for
p_name
,
p_val
in
extract_model_params
(
model
).
items
():
history
[
p_name
].
append
(
p_val
)
history
[
"
loss
"
].
append
(
loss
.
item
())
# -
ncols
=
len
(
history
)
fig
,
axs
=
plt
.
subplots
(
ncols
=
ncols
,
nrows
=
1
,
figsize
=
(
ncols
*
5
,
5
))
...
...
@@ -258,8 +188,9 @@ for ax, (p_name, p_lst) in zip(axs, history.items()):
ax
.
plot
(
p_lst
)
ax
.
set_title
(
p_name
)
ax
.
set_xlabel
(
"
iterations
"
)
# -
# Values of optimized hyper params
pprint
(
extract_model_params
(
model
,
raw
=
False
))
# # Run prediction
...
...
@@ -316,7 +247,6 @@ for ax, c in zip(axs, [c0, c1]):
ax
.
set_ylabel
(
"
X_1
"
)
ax
.
scatter
(
x
=
X_train
[:,
0
],
y
=
X_train
[:,
1
],
color
=
"
white
"
,
alpha
=
0.2
)
fig
.
colorbar
(
c
,
ax
=
ax
)
# -
# When running as script
if
not
is_interactive
():
...
...
This diff is collapsed.
Click to expand it.
BLcourse2.3/utils.py
0 → 100644
+
62
−
0
View file @
90eafd04
import
gpytorch
def
extract_model_params
(
model
,
raw
=
False
)
->
dict
:
"""
Helper to convert model.named_parameters() to dict.
With raw=True, use
foo.bar.raw_param
else
foo.bar.param
See https://docs.gpytorch.ai/en/stable/examples/00_Basic_Usage/Hyperparameters.html#Raw-vs-Actual-Parameters
"""
if
raw
:
return
dict
(
(
p_name
,
p_val
.
item
())
for
p_name
,
p_val
in
model
.
named_parameters
()
)
else
:
out
=
dict
()
# p_name = 'covar_module.base_kernel.raw_lengthscale'. Access
# model.covar_module.base_kernel.lengthscale (w/o the raw_)
for
p_name
,
p_val
in
model
.
named_parameters
():
# Yes, eval() hack. Sorry.
p_name
=
p_name
.
replace
(
"
.raw_
"
,
"
.
"
)
p_val
=
eval
(
f
"
model.
{
p_name
}
"
)
out
[
p_name
]
=
p_val
.
item
()
return
out
def
plot_samples
(
ax
,
X_pred
,
samples
,
label
=
None
,
**
kwds
):
plot_kwds
=
dict
(
color
=
"
tab:green
"
,
alpha
=
0.3
)
plot_kwds
.
update
(
kwds
)
if
label
is
None
:
ax
.
plot
(
X_pred
,
samples
.
T
,
**
plot_kwds
)
else
:
ax
.
plot
(
X_pred
,
samples
[
0
,
:],
**
plot_kwds
,
label
=
label
)
ax
.
plot
(
X_pred
,
samples
[
1
:,
:].
T
,
**
plot_kwds
,
label
=
"
_
"
)
class
ExactGPModel
(
gpytorch
.
models
.
ExactGP
):
"""
API:
model.forward() prior f_pred
model() posterior f_pred
likelihood(model.forward()) prior with noise y_pred
likelihood(model()) posterior with noise y_pred
"""
def
__init__
(
self
,
X_train
,
y_train
,
likelihood
):
super
().
__init__
(
X_train
,
y_train
,
likelihood
)
self
.
mean_module
=
gpytorch
.
means
.
ConstantMean
()
self
.
covar_module
=
gpytorch
.
kernels
.
ScaleKernel
(
gpytorch
.
kernels
.
RBFKernel
()
)
def
forward
(
self
,
x
):
mean_x
=
self
.
mean_module
(
x
)
covar_x
=
self
.
covar_module
(
x
)
return
gpytorch
.
distributions
.
MultivariateNormal
(
mean_x
,
covar_x
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment