Skip to content

Commit

Permalink
Added updated objective function for hyper parameter optimization, se…
Browse files Browse the repository at this point in the history
…ems to be working for sigma values, but not for kernel parameters
  • Loading branch information
ianran committed Nov 28, 2023
1 parent ccec230 commit af73d78
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 12 deletions.
4 changes: 2 additions & 2 deletions examples/GaussianProcess/User2DGP.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def f_sq(x, data=None):
#gp = gr.PreferenceGP(gr.linear_kern(0.3, 0.1, 0.0))
gp = gr.PreferenceGP(gr.RBF_kern(1.0, 1.0), pareto_pairs=True, \
use_hyper_optimization=False, normalize_positive=True)
gp.add_prior(bounds=np.array(bounds))
#gp.add_prior(bounds=np.array(bounds), num_pts=100)



Expand All @@ -78,7 +78,7 @@ def f_sq(x, data=None):

gp.add(train_X[selected_idx], pairs)

gp.optimize(optimize_hyperparameter=False)
gp.optimize(optimize_hyperparameter=True)

x = np.linspace(bounds[0][0], bounds[0][1], num_side)
y = np.linspace(bounds[1][0], bounds[1][1], num_side)
Expand Down
6 changes: 4 additions & 2 deletions examples/GaussianProcess/UserGP.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,20 @@ def f_sin(x, data=None):
gr.generate_fake_pairs(X_train, f_sin, 4)


gp = gr.PreferenceGP(gr.RBF_kern(0.5, 0.7))
#gp = gr.PreferenceGP(gr.RBF_kern(0.5, 0.7))
gp = gr.PreferenceGP(gr.RBF_kern(0.5, 0.2))
#gp = gr.PreferenceGP(gr.periodic_kern(1.2,0.3,5))
#gp = gr.PreferenceGP(gr.linear_kern(0.2, 0.2, 0.2))
#gp = gr.PreferenceGP(gr.RBF_kern(0.2,1)+gr.periodic_kern(1,0.2,0)+gr.linear_kern(0.2,0.1,0.3))
#gp = gr.PreferenceGP(gr.RBF_kern(0.1,1)*gr.linear_kern(0.3,0.2,0.3))

gp.add(X_train, pairs)

gp.optimize(optimize_hyperparameter=False)
gp.optimize(optimize_hyperparameter=True)
print('gp.calc_ll()')
print(gp.calc_ll())

print('Simga = '+str(gp.probits[0].sigma))

X = np.arange(-0.5, 8, 0.1)
mu, sigma = gp.predict(X)
Expand Down
23 changes: 15 additions & 8 deletions src/rdml_graph/gaussian_process/PreferenceGP.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def __init__(self, cov_func, normalize_gp=True, pareto_pairs=False, \
super(PreferenceGP, self).__init__(cov_func, mat_inv, active_learner=active_learner)
PreferenceModel.__init__(self, pareto_pairs, other_probits)

self.lambda_gp = 0.1
self.lambda_gp = 0.9

self.normalize_gp = normalize_gp

Expand All @@ -76,7 +76,7 @@ def __init__(self, cov_func, normalize_gp=True, pareto_pairs=False, \

# sigma on the likelihood function.
#self.sigma_L = 1.0
self.probits = [PreferenceProbit(sigma = 1.0)]
self.probits = [PreferenceProbit(sigma = 0.05)]



Expand All @@ -88,7 +88,7 @@ def __init__(self, cov_func, normalize_gp=True, pareto_pairs=False, \



self.delta_f = 0.002 # set the convergence to stop
self.delta_f = 0.0002 # set the convergence to stop
self.maxloops = 100


Expand Down Expand Up @@ -144,14 +144,15 @@ def calc_ll_param(self, hyperparameters, X_train, y_train):

self.probits[self.probit_idxs['relative_discrete']].set_sigma(sigma_L)
self.cov_func.set_param(hyperparameters[1:])
K = self.cov_func.cov(X_train, X_train)
# K = self.cov_func.cov(X_train, X_train)

W, dpy_df, logpYF = self.derivatives(y_train, self.F)
# W, dpy_df, logpYF = self.derivatives(y_train, self.F)

Kinv = self.invert_function(K)
term2 = 0.5 * np.matmul(np.matmul(np.transpose(self.F), Kinv), self.F)
# Kinv = self.invert_function(K)
# term2 = 0.5 * np.matmul(np.matmul(np.transpose(self.F), Kinv), self.F)

term3 = 0.5 * np.log(np.linalg.det(np.identity(len(K)) + np.matmul(K, W)))
# term3 = 0.5 * np.log(np.linalg.det(np.identity(len(K)) + np.matmul(K, W)))
return self.loss_F(self.F)

#return logpYF
return logpYF - term2 - term3
Expand Down Expand Up @@ -254,6 +255,7 @@ def findMode(self, x_train, y_train):

# check for convergence
f_err = np.linalg.norm((F_new - F), ord=np.inf)
print("\tf_err="+str(f_err))
F = F_new


Expand All @@ -262,11 +264,16 @@ def findMode(self, x_train, y_train):
print('WARNING: maximum loops in findMode exceeded. Returning current solution')
break

print('Optimization ran for: '+str(n_loops))


self.F = F
# calculate W with final F
self.W, self.grad_ll, self.log_likelihood = \
self.derivatives(y_train, self.F)

print('Best solution, training data likelyhood = '+str(self.log_likelihood))


## optimize
# Runs the optimization step required by the user preference GP.
Expand Down
3 changes: 3 additions & 0 deletions src/rdml_graph/gaussian_process/PreferenceModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,9 @@ def add(self, X, y, type='relative_discrete', training_sigma=0):
self.y_train[self.probit_idxs['relative_discrete']] = \
np.append(self.y_train[self.probit_idxs['relative_discrete']], \
np.array(pairs), axis=0)
else:
import pdb
pdb.set_trace()
# end if for pareto_pairs


Expand Down

0 comments on commit af73d78

Please sign in to comment.