// compute kernel stuff
num_data = tf.shape(X)[0]
Kmn = kern.K(X, Xnew)
Kmm = kern.K(X) + eye(num_data)
Lm = tf.cholesky(Kmm)
// Compute the projection matrix A
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True)
// compute the covariance due to the conditioning
if full_cov:
fvar = kern.K(Xnew) - tf.matmul(tf.transpose(A), A)
fvar = tf.tile(tf.expand_dims(fvar, 2), [1, 1, num_columns])
else:
fvar = kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.expand_dims(fvar, 1), [1, num_columns])
// another backsubstitution in the unwhitened case
if not whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)
// construct the conditional mean
fmean = tf.matmul(tf.transpose(A), f)
// add extra projected variance from q(f) if needed
if q_sqrt is not None:
projected_var = []
for d in range(num_columns):
if q_sqrt.get_shape().ndims == 2:
LTA = A * q_sqrt[:, d:d + 1]
elif q_sqrt.get_shape().ndims == 3:
L = tf.user_ops.triangle(q_sqrt[:, :, d], "lower")
LTA = tf.matmul(tf.transpose(L), A)
else: // pragma no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))
After Change
// compute kernel stuff
num_data = tf.shape(X)[0]
Kmn = kern.K(X, Xnew)
Kmm = kern.K(X) + eye(num_data) * 1e-6
Lm = tf.cholesky(Kmm)
// Compute the projection matrix A
A = tf.matrix_triangular_solve(Lm, Kmn, lower=True)
// compute the covariance due to the conditioning
if full_cov:
fvar = kern.K(Xnew) - tf.matmul(tf.transpose(A), A)
fvar = tf.tile(tf.expand_dims(fvar, 2), [1, 1, num_columns])
else:
fvar = kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.expand_dims(fvar, 1), [1, num_columns])
// another backsubstitution in the unwhitened case
if not whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False)
// construct the conditional mean
fmean = tf.matmul(tf.transpose(A), f)
// add extra projected variance from q(f) if needed
if q_sqrt is not None:
projected_var = []
for d in range(num_columns):
if q_sqrt.get_shape().ndims == 2:
LTA = A * q_sqrt[:, d:d + 1]
elif q_sqrt.get_shape().ndims == 3:
L = tf.user_ops.triangle(q_sqrt[:, :, d], "lower")
LTA = tf.matmul(tf.transpose(L), A)
else: // pragma no cover
raise ValueError("Bad dimension for q_sqrt: %s" %
str(q_sqrt.get_shape().ndims))