LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
jacobianest.m
1function [jac,err] = jacobianest(fun,x0)
2% gradest: estimate of the Jacobian matrix of a vector valued function of n variables
3% usage: [jac,err] = jacobianest(fun,x0)
4%
5%
6% arguments: (input)
7% fun - (vector valued) analytical function to differentiate.
8% fun must be a function of the vector or array x0.
9%
10% x0 - vector location at which to differentiate fun
11% If x0 is an nxm array, then fun is assumed to be
12% a function of n*m variables.
13%
14%
15% arguments: (output)
16% jac - array of first partial derivatives of fun.
17% Assuming that x0 is a vector of length p
18% and fun returns a vector of length n, then
19% jac will be an array of size (n,p)
20%
21% err - vector of error estimates corresponding to
22% each partial derivative in jac.
23%
24%
25% Example: (nonlinear least squares)
26% xdata = (0:.1:1)';
27% ydata = 1+2*exp(0.75*xdata);
28% fun = @(c) ((c(1)+c(2)*exp(c(3)*xdata)) - ydata).^2;
29%
30% [jac,err] = jacobianest(fun,[1 1 1])
31%
32% jac =
33% -2 -2 0
34% -2.1012 -2.3222 -0.23222
35% -2.2045 -2.6926 -0.53852
36% -2.3096 -3.1176 -0.93528
37% -2.4158 -3.6039 -1.4416
38% -2.5225 -4.1589 -2.0795
39% -2.629 -4.7904 -2.8742
40% -2.7343 -5.5063 -3.8544
41% -2.8374 -6.3147 -5.0518
42% -2.9369 -7.2237 -6.5013
43% -3.0314 -8.2403 -8.2403
44%
45% err =
46% 5.0134e-15 5.0134e-15 0
47% 5.0134e-15 0 2.8211e-14
48% 5.0134e-15 8.6834e-15 1.5804e-14
49% 0 7.09e-15 3.8227e-13
50% 5.0134e-15 5.0134e-15 7.5201e-15
51% 5.0134e-15 1.0027e-14 2.9233e-14
52% 5.0134e-15 0 6.0585e-13
53% 5.0134e-15 1.0027e-14 7.2673e-13
54% 5.0134e-15 1.0027e-14 3.0495e-13
55% 5.0134e-15 1.0027e-14 3.1707e-14
56% 5.0134e-15 2.0053e-14 1.4013e-12
57%
58% (At [1 2 0.75], jac should be numerically zero)
59%
60%
61% See also: derivest, gradient, gradest
62%
63%
64% Author: John D'Errico
65% e-mail: woodchips@rochester.rr.com
66% Release: 1.0
67% Release date: 3/6/2007
68
69% get the length of x0 for the size of jac
70nx = numel(x0);
71
72MaxStep = 100;
73StepRatio = 2.0000001;
74
75% was a string supplied?
76if ischar(fun)
77 fun = str2func(fun);
78end
79
80% get fun at the center point
81f0 = fun(x0);
82f0 = f0(:);
83n = length(f0);
84if n==0
85 % empty begets empty
86 jac = zeros(0,nx);
87 err = jac;
88 return
89end
90
91relativedelta = MaxStep*StepRatio .^(0:-1:-25);
92nsteps = length(relativedelta);
93
94% total number of derivatives we will need to take
95jac = zeros(n,nx);
96err = jac;
97for i = 1:nx
98 x0_i = x0(i);
99 if x0_i ~= 0
100 delta = x0_i*relativedelta;
101 else
102 delta = relativedelta;
103 end
104
105 % evaluate at each step, centered around x0_i
106 % difference to give a second order estimate
107 fdel = zeros(n,nsteps);
108 for j = 1:nsteps
109 fdif = fun(swapelement(x0,i,x0_i + delta(j))) - ...
110 fun(swapelement(x0,i,x0_i - delta(j)));
111
112 fdel(:,j) = fdif(:);
113 end
114
115 % these are pure second order estimates of the
116 % first derivative, for each trial delta.
117 derest = fdel.*repmat(0.5 ./ delta,n,1);
118
119 % The error term on these estimates has a second order
120 % component, but also some 4th and 6th order terms in it.
121 % Use Romberg exrapolation to improve the estimates to
122 % 6th order, as well as to provide the error estimate.
123
124 % loop here, as rombextrap coupled with the trimming
125 % will get complicated otherwise.
126 for j = 1:n
127 [der_romb,errest] = rombextrap(StepRatio,derest(j,:),[2 4]);
128
129 % trim off 3 estimates at each end of the scale
130 nest = length(der_romb);
131 trim = [1:3, nest+(-2:0)];
132 [der_romb,tags] = sort(der_romb);
133 der_romb(trim) = [];
134 tags(trim) = [];
135
136 errest = errest(tags);
137
138 % now pick the estimate with the lowest predicted error
139 [err(j,i),ind] = min(errest);
140 jac(j,i) = der_romb(ind);
141 end
142end
143
144end % mainline function end
145
146% =======================================
147% sub-functions
148% =======================================
149function vec = swapelement(vec,ind,val)
150% swaps val as element ind, into the vector vec
151vec(ind) = val;
152
153end % sub-function end
154
155% ============================================
156% subfunction - romberg extrapolation
157% ============================================
158function [der_romb,errest] = rombextrap(StepRatio,der_init,rombexpon)
159% do romberg extrapolation for each estimate
160%
161% StepRatio - Ratio decrease in step
162% der_init - initial derivative estimates
163% rombexpon - higher order terms to cancel using the romberg step
164%
165% der_romb - derivative estimates returned
166% errest - error estimates
167% amp - noise amplification factor due to the romberg step
168
169srinv = 1/StepRatio;
170
171% do nothing if no romberg terms
172nexpon = length(rombexpon);
173rmat = ones(nexpon+2,nexpon+1);
174% two romberg terms
175rmat(2,2:3) = srinv.^rombexpon;
176rmat(3,2:3) = srinv.^(2*rombexpon);
177rmat(4,2:3) = srinv.^(3*rombexpon);
178
179% qr factorization used for the extrapolation as well
180% as the uncertainty estimates
181[qromb,rromb] = qr(rmat,0);
182
183% the noise amplification is further amplified by the Romberg step.
184% amp = cond(rromb);
185
186% this does the extrapolation to a zero step size.
187ne = length(der_init);
188rhs = vec2mat(der_init,nexpon+2,ne - (nexpon+2));
189rombcoefs = rromb\‍(qromb'*rhs);
190der_romb = rombcoefs(1,:)';
191
192% uncertainty estimate of derivative prediction
193s = sqrt(sum((rhs - rmat*rombcoefs).^2,1));
194rinv = rromb\eye(nexpon+1);
195cov1 = sum(rinv.^2,2); % 1 spare dof
196errest = s'*12.7062047361747*sqrt(cov1(1));
197
198end % rombextrap
199
200
201% ============================================
202% subfunction - vec2mat
203% ============================================
204function mat = vec2mat(vec,n,m)
205% forms the matrix M, such that M(i,j) = vec(i+j-1)
206[i,j] = ndgrid(1:n,0:m-1);
207ind = i+j;
208mat = vec(ind);
209if n==1
210 mat = mat';
211end
212
213end % vec2mat
214
215
216