-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathProx_PDA.m
97 lines (76 loc) · 2.81 KB
/
Prox_PDA.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
function [xminuxbar, sq_grad,time,beta]= Prox_PDA(x_temp, edge_index,iter_num,big_L,WW,min_eig_L_hat, A,B,D,Adj,degree,n,N,gc,lambda,aalpha, features, labels,bs)
%Opt_Prox_PDA, full_grad,
% % This is the original code from xFilter.
fprintf('Prox_PDA starting... \n');
sq_grad = zeros(iter_num, 1); % T is the iteration number, we choose T = 1000;
xminuxbar = zeros(iter_num, 1);
time = zeros(iter_num, 1);
xs = x_temp(:,1);
xs_ = reshape(xs, [n, N]);
xs_temp = sum(xs_, 2);
temp_grad = zeros(N*n,1);
for ii = 1 : N
for jj=(ii-1)*bs+1:ii*bs
temp_grad((ii-1)*n+1:ii*n) = temp_grad((ii-1)*n+1:ii*n) + gc(xs_temp,lambda,aalpha, features(:,jj), labels(jj),bs, N); % This is compute the gradient in each node, batch_size works here.
end
end
% grad_temp = reshape(temp_grad, n, N);
%
%
% Opt_Prox_PDA = zeros(iter_num-1,1);
x = x_temp;
mu = zeros((edge_index-1)*n,iter_num);
upd = textprogressbar(iter_num);
beta = 80 * big_L * max(max(eig(WW)), 1) / (min(min_eig_L_hat, 1)*N);
% s_temp = zeros(size(A, 1)/n, 1);
% k=1;
% for ii = 1:size(Adj,1)
% for jj = ii+1:size(Adj,1)
% if Adj(ii, jj)==1
% s_temp(k) = 1/sqrt(degree(ii)*degree(jj));
% k=k+1;
% end
% end
% end
% sigma = diag(beta * s_temp);
% sigma = kron(sigma, eye(n));
f1=(1/beta)*inv( A'*A + B'*B );
f2 = beta*B'*B;%beta is missing
f1=sparse(f1);
f2=sparse(f2);
A=sparse(A);
for iter = 2 : iter_num
tic;
upd(iter);
% calculate the gradient
gradient = zeros(N*n,1);
gradient_matrix = zeros(n,N);
for ii = 1 : N
for jj=(ii-1)*bs+1:ii*bs
gradient((ii-1)*n+1:ii*n) = gradient((ii-1)*n+1:ii*n) + gc(x((ii-1)*n+1:ii*n,iter-1),lambda,aalpha, features(:,jj), labels(jj),bs, N);
end
gradient_matrix(:,ii) = gradient((ii-1)*n+1:ii*n);
end
% update x and mu
x(:,iter) = f1 * (f2* x(:,iter-1) - gradient - A'*mu(:,iter-1));%beta is missing
mu(:,iter) = mu(:,iter-1) + beta* A *x(:,iter);
x_ = reshape(x(:, iter), [n, N]);
x_avg = sum(x_, 2)/N;
for k = 1:N
xminuxbar(iter) = xminuxbar(iter)+(norm(x_(:, k)-x_avg))^2;
end
% Compute the sq_grad
temp_grad = zeros(N*n,1);
for ii = 1 : N
for jj=(ii-1)*bs+1:ii*bs
temp_grad((ii-1)*n+1:ii*n) = temp_grad((ii-1)*n+1:ii*n) + gc(x_avg,lambda,aalpha, features(:,jj), labels(jj),bs, N); % This is compute the gradient in each node, batch_size works here.
end
end
g = reshape(temp_grad, [n, N]);
sq_grad(iter) = sum(sum(g, 2).^2);
% % calculate opt
% full_grad = sum(gradient_matrix,2);
% Opt_Prox_PDA(iter-1,1) = norm(full_grad)^2 + norm(A*x(:,iter))^2*big_L/N^2;
t_temp = toc;
time(iter) = time(iter - 1) + t_temp;
end