Skip to content

Commit fa64793

Browse files
committed
refined doc for ch09
1 parent 71e007d commit fa64793

File tree

14 files changed

+141
-81
lines changed

14 files changed

+141
-81
lines changed

chapter07/rvmBinFp.m

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
11
function [model, llh] = rvmBinFp(X, t, alpha)
2-
% Relevance Vector Machine (ARD sparse prior) for binary classification
3-
% training by empirical bayesian (type II ML) using fix point update (Mackay update)
2+
% Relevance Vector Machine (ARD sparse prior) for binary classification.
3+
% trained by empirical bayesian (type II ML) using Mackay fix point update.
4+
% Input:
5+
% X: d x n data matrix
6+
% t: 1 x n label (0/1)
7+
% alpha: prior parameter
8+
% Output:
9+
% model: trained model structure
10+
% llh: loglikelihood
411
% Written by Mo Chen ([email protected]).
512
if nargin < 3
613
alpha = 1;

chapter09/demo.m

Lines changed: 66 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -1,70 +1,70 @@
1+
% demos for ch09
12

3+
%% Empirical Bayesian linear regression via EM
4+
close all; clear;
5+
d = 5;
6+
n = 200;
7+
[x,t] = linRnd(d,n);
8+
[model,llh] = linRegEm(x,t);
9+
plot(llh);
210

3-
%% demo: EM linear regression
4-
% close all; clear;
5-
% d = 5;
6-
% n = 200;
7-
% [x,t] = linRnd(d,n);
8-
% [model,llh] = linRegEm(x,t);
9-
% plot(llh);
11+
%% RVM classification via EM
12+
clear; close all
13+
k = 2;
14+
d = 2;
15+
n = 1000;
16+
[X,t] = kmeansRnd(d,k,n);
17+
[x1,x2] = meshgrid(linspace(min(X(1,:)),max(X(1,:)),n), linspace(min(X(2,:)),max(X(2,:)),n));
1018

11-
%% classification
12-
% clear; close all
13-
% k = 2;
14-
% d = 2;
15-
% n = 1000;
16-
% [X,t] = kmeansRnd(d,k,n);
17-
% [x1,x2] = meshgrid(linspace(min(X(1,:)),max(X(1,:)),n), linspace(min(X(2,:)),max(X(2,:)),n));
18-
%
19-
% [model, llh] = rvmBinEm(X,t-1);
20-
% plot(llh);
21-
% y = rvmBinPred(model,X)+1;
22-
% figure;
23-
% binPlot(model,X,y);
24-
%% demo: kmeans
25-
% close all; clear;
26-
% d = 2;
27-
% k = 3;
28-
% n = 500;
29-
% [X,label] = kmeansRnd(d,k,n);
30-
% y = kmeans(X,k);
31-
% plotClass(X,label);
32-
% figure;
33-
% plotClass(X,y);
19+
[model, llh] = rvmBinEm(X,t-1);
20+
plot(llh);
21+
y = rvmBinPred(model,X)+1;
22+
figure;
23+
binPlot(model,X,y);
24+
%% kmeans
25+
close all; clear;
26+
d = 2;
27+
k = 3;
28+
n = 500;
29+
[X,label] = kmeansRnd(d,k,n);
30+
y = kmeans(X,k);
31+
plotClass(X,label);
32+
figure;
33+
plotClass(X,y);
3434

35-
%% demo: Em for Gauss Mixture
36-
% close all; clear;
37-
% d = 2;
38-
% k = 3;
39-
% n = 1000;
40-
% [X,label] = mixGaussRnd(d,k,n);
41-
% plotClass(X,label);
42-
%
43-
% m = floor(n/2);
44-
% X1 = X(:,1:m);
45-
% X2 = X(:,(m+1):end);
46-
% % train
47-
% [z1,model,llh] = mixGaussEm(X1,k);
48-
% figure;
49-
% plot(llh);
50-
% figure;
51-
% plotClass(X1,z1);
52-
% % predict
53-
% z2 = mixGaussPred(X2,model);
54-
% figure;
55-
% plotClass(X2,z2);
56-
%% demo: Em for Gauss mixture initialized with kmeans;
57-
% close all; clear;
58-
% d = 2;
59-
% k = 3;
60-
% n = 500;
61-
% [X,label] = mixGaussRnd(d,k,n);
62-
% init = kmeans(X,k);
63-
% [z,model,llh] = mixGaussEm(X,init);
64-
% plotClass(X,label);
65-
% figure;
66-
% plotClass(X,init);
67-
% figure;
68-
% plotClass(X,z);
69-
% figure;
70-
% plot(llh);
35+
%% Gausssian Mixture via EM
36+
close all; clear;
37+
d = 2;
38+
k = 3;
39+
n = 1000;
40+
[X,label] = mixGaussRnd(d,k,n);
41+
plotClass(X,label);
42+
43+
m = floor(n/2);
44+
X1 = X(:,1:m);
45+
X2 = X(:,(m+1):end);
46+
% train
47+
[z1,model,llh] = mixGaussEm(X1,k);
48+
figure;
49+
plot(llh);
50+
figure;
51+
plotClass(X1,z1);
52+
% predict
53+
z2 = mixGaussPred(X2,model);
54+
figure;
55+
plotClass(X2,z2);
56+
%% Gauss mixture initialized by kmeans
57+
close all; clear;
58+
d = 2;
59+
k = 3;
60+
n = 500;
61+
[X,label] = mixGaussRnd(d,k,n);
62+
init = kmeans(X,k);
63+
[z,model,llh] = mixGaussEm(X,init);
64+
plotClass(X,label);
65+
figure;
66+
plotClass(X,init);
67+
figure;
68+
plotClass(X,z);
69+
figure;
70+
plot(llh);

chapter09/kmeans.m

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
function [label, energy, model] = kmeans(X, init)
2-
% Perform k-means clustering.
2+
% Perform k-means clustering.
3+
% Input:
34
% X: d x n data matrix
4-
% k: number of seeds
5+
% init: k number of clusters or label (1 x n vector)
6+
% Output:
7+
% label: 1 x n cluster label
8+
% energy: optimization target value
9+
% model: trained model structure
510
% Written by Mo Chen ([email protected]).
611
n = size(X,2);
712
if numel(init)==1

chapter09/kmeansPred.m

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
function [label, energy] = kmeansPred(model, Xt)
22
% Prediction for kmeans clusterng
3+
% Input:
34
% model: trained model structure
45
% Xt: d x n testing data
6+
% Output:
7+
% label: 1 x n cluster label
8+
% energy: optimization target value
59
% Written by Mo Chen ([email protected]).
610
[val,label] = min(sqdist(model.means, Xt));
711
energy = sum(val);

chapter09/kmeansRnd.m

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
function [X, z, center] = kmeansRnd(d, k, n)
2-
% Sampling from a Gaussian mixture distribution with common variances (kmeans model).
3-
% Written by Michael Chen ([email protected]).
2+
% Generate samples from a Gaussian mixture distribution with common variances (kmeans model).
3+
% Input:
4+
% d: dimension of data
5+
% k: number of components
6+
% n: number of data
7+
% Output:
8+
% X: d x n data matrix
9+
% z: 1 x n response variable
10+
% center: d x k centers of clusters
11+
% Written by Mo Chen ([email protected]).
412
alpha = 1;
513
beta = nthroot(k,d); % in volume x^d there is k points: x^d=k
614

chapter09/linRegEm.m

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
% X: d x n data
55
% t: 1 x n response
66
% alpha: prior parameter
7-
% beta? prior parameter
7+
% beta: prior parameter
88
% Output:
99
% model: trained model structure
1010
% llh: loglikelihood

chapter09/mixBernEm.m

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
function [label, model, llh] = mixBernEm(X, k)
22
% Perform EM algorithm for fitting the Bernoulli mixture model.
3+
% Input:
34
% X: d x n data matrix
4-
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
5+
% k: number of cluster (1 x 1) or label (1 x n, 1<=label(i)<=k) or model structure
6+
% Output:
7+
% label: 1 x n cluster label
8+
% model: trained model structure
9+
% llh: loglikelihood
510
% Written by Mo Chen ([email protected]).
611
%% initialization
712
fprintf('EM for mixture model: running ... \n');

chapter09/mixGaussEm.m

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
% Perform EM algorithm for fitting the Gaussian mixture model.
33
% Input:
44
% X: d x n data matrix
5-
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or model structure
5+
% init: k (1 x 1) number of components or label (1 x n, 1<=label(i)<=k) or model structure
66
% Output:
77
% label: 1 x n cluster label
88
% model: trained model structure

chapter09/mixGaussPred.m

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
function [label, R] = mixGaussPred(X, model)
22
% Predict label and responsibility for Gaussian mixture model.
3+
% Input:
34
% X: d x n data matrix
45
% model: trained model structure outputed by the EM algirthm
6+
% Output:
7+
% label: 1 x n cluster label
8+
% R: k x n responsibility
59
% Written by Mo Chen ([email protected]).
610
mu = model.mu;
711
Sigma = model.Sigma;

chapter09/mixGaussRnd.m

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
function [X, z, model] = mixGaussRnd(d, k, n)
22
% Sampling form a Gaussian mixture distribution.
3-
% Written by Michael Chen ([email protected]).
3+
% Input:
4+
% d: dimension of data
5+
% k: number of components
6+
% n: number of data
7+
% Output:
8+
% X: d x n data matrix
9+
% z: 1 x n response variable
10+
% model: model structure
11+
% Written by Mo Chen ([email protected]).
412
alpha0 = 1; % hyperparameter of Dirichlet prior
513
W0 = eye(d); % hyperparameter of inverse Wishart prior of covariances
614
v0 = d+1; % hyperparameter of inverse Wishart prior of covariances

chapter09/mixMnEm.m

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,12 @@
11
function [label, model, llh] = mixMnEm(X, k)
22
% Perform EM algorithm for fitting the multinomial mixture model.
3+
% Input:
34
% X: d x n data matrix
4-
% init: k (1 x 1) or label (1 x n, 1<=label(i)<=k) or center (d x k)
5+
% k: number of cluster (1 x 1) or label (1 x n, 1<=label(i)<=k) or model structure
6+
% Output:
7+
% label: 1 x n cluster label
8+
% model: trained model structure
9+
% llh: loglikelihood
510
% Written by Mo Chen ([email protected]).
611
%% initialization
712
fprintf('EM for mixture model: running ... \n');

chapter09/rvmBinEm.m

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
11
function [model, llh] = rvmBinEm(X, t, alpha)
2-
% Relevance Vector Machine (ARD sparse prior) for binary classification
3-
% training by empirical bayesian (type II ML) using fix point update (Mackay update)
2+
% Relevance Vector Machine (ARD sparse prior) for binary classification.
3+
% trained by empirical bayesian (type II ML) using EM.
4+
% Input:
5+
% X: d x n data matrix
6+
% t: 1 x n label (0/1)
7+
% alpha: prior parameter
8+
% Output:
9+
% model: trained model structure
10+
% llh: loglikelihood
411
% Written by Mo Chen ([email protected]).
512
if nargin < 3
613
alpha = 1;

chapter09/rvmRegEm.m

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
function [model, llh] = rvmRegEm(X, t, alpha, beta)
22
% Relevance Vector Machine (ARD sparse prior) for regression
3-
% training by empirical bayesian (type II ML) using standard EM update
3+
% trained by empirical bayesian (type II ML) using EM
4+
% Input:
5+
% X: d x n data
6+
% t: 1 x n response
7+
% alpha: prior parameter
8+
% beta: prior parameter
9+
% Output:
10+
% model: trained model structure
11+
% llh: loglikelihood
412
% Written by Mo Chen ([email protected]).
513
if nargin < 3
614
alpha = 0.02;

chapter14/mixLinRnd.m

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
% y: 1 x n response variable
1010
% W: d+1 x k weight matrix
1111
% Written by Mo Chen ([email protected]).
12-
% Written by Mo Chen ([email protected]).
1312
W = randn(d+1,k);
1413
[X, z] = kmeansRnd(d, k, n);
1514
y = zeros(1,n);

0 commit comments

Comments
 (0)