(注:更正后的代码贴在原代码下方。)
对于优化练习,我有兴趣从头开始编写一个简单的示例:使用梯度下降的 Tikhonov 去噪例程,它在输出中与基于 LSQ 的 Tikhonov 去噪结果完全匹配。
用于噪声信号的正则化恢复去噪版本, 我应用了 Tikhonov 的典型公式
以为单位矩阵,为加权梯度矩阵.
在 LSQ 的情况下,这可以使用和来解决,其中 , 和。下面的代码显示了平滑信号的结果。
使用梯度下降版本,我现在尝试最小化其中
我通过朝着函数梯度的负方向前进来做到这一点,我相信这是
与拉普拉斯算子。但是这段代码会产生垃圾。谁能发现我的错误?
代码:
function tik_1d
signal = sin((1:512)/128*2*pi);
signal = signal + randn(size(signal))*0.05;
lambda = 15;
niter = 50;
signal_lsq = tik_lsq(signal, lambda);
signal_opt = tik_opt(signal, lambda, niter);
figure(1); set(gcf, 'color', 'w');
subplot(3, 1, 1); plot(signal); title('b');
subplot(3, 1, 2); plot(signal_lsq); title('LSQ x');
subplot(3, 1, 3); plot(signal_opt); title('GD x');
end
function signal_lsq = tik_lsq(signal, lambda)
grad = gradient_matrix(signal)*lambda; %subfunction see below
A = [eye(numel(signal)); grad];
s = signal';
b = [s; zeros(size(s))];
signal_lsq = A\b;
end
function m = gradient_matrix(signal)
n = numel(signal);
diag1 = ones(n,1);
diag2 = -diag1;
m = spdiags([diag1 diag2], [0 1], n, n);
end
function signal_opt = tik_opt(signal, lambda, niter)
e = zeros(niter,1);
signal_opt = signal;
tau = 0.1;
for n = 1:niter
e(n) = eval_f(signal_opt, signal, lambda); %subfunction see below
diff = tau*grad_f(signal_opt, signal, lambda); %subfunction see below
signal_opt = signal_opt - diff;
end
figure(2); plot(e);
end
function e = eval_f(signal_rec, signal, lambda)
signal_grad = conv(signal_rec, [1 -1], 'same');
e = 0.5*norm(signal_rec-signal) + norm(lambda*signal_grad);
end
function g = grad_f(signal_rec, signal, lambda)
signal_lap = conv(signal_rec, [1 -2 1], 'same');
g = (signal - signal_rec) + lambda*(signal_lap);
end
输出:
更新:这是包含 Christian Clason 建议的代码:
function tik_1d
signal = sin((1:512)/128*2*pi);
signal = signal + randn(size(signal))*0.05;
lambda = 15;
niter = 500;
signal_lsq = tik_lsq(signal, lambda);
signal_opt = tik_opt(signal, lambda, niter);
figure(1); set(gcf, 'color', 'w');
subplot(4, 1, 1); plot(signal); title('b'); ylim([-1 1]);
subplot(4, 1, 2); plot(signal_lsq); title('LSQ x'); ylim([-1 1]);
subplot(4, 1, 3); plot(signal_opt); title('GD x'); ylim([-1 1]);
subplot(4, 1, 4); plot(signal_opt - signal_lsq'); title('LSQ x - GD x'); ylim([-1 1]);
end
function signal_lsq = tik_lsq(signal, lambda)
grad = gradient_matrix(signal)*sqrt(lambda); %subfunction see below
A = [eye(numel(signal)); grad];
s = signal';
b = [s; zeros(size(s))];
signal_lsq = A\b;
end
function m = gradient_matrix(signal)
n = numel(signal);
diag1 = ones(n,1);
diag2 = -diag1;
m = spdiags([diag1 diag2], [0 1], n, n);
end
function signal_opt = tik_opt(signal, lambda, niter)
e = zeros(niter,1);
signal_opt = signal;
tau = 0.001;
for n = 1:niter
e(n) = eval_f(signal_opt, signal, lambda); %subfunction see below
diff = tau*grad_f(signal_opt, signal, lambda); %subfunction see below
signal_opt = signal_opt - diff;
end
figure(2); plot(e);
end
function e = eval_f(signal_rec, signal, lambda)
signal_grad = conv(signal_rec, [1 -1], 'same');
e = 0.5*norm(signal_rec-signal) + norm(lambda*signal_grad);
end
function g = grad_f(signal_rec, signal, lambda)
signal_lap = conv(signal_rec, [1 -2 1], 'same');
g = (signal - signal_rec) - lambda*(signal_lap);
end
输出:

