LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
updateMetricsDefault.m
1function updateMetricsDefault(self, it)
2ensemble = self.ensemble;
3lqn = self.lqn;
4
5% obtain the activity service times
6self.servt = zeros(lqn.nidx,1);
7self.residt = zeros(lqn.nidx,1);
8for r=1:size(self.servt_classes_updmap,1)
9 idx = self.servt_classes_updmap(r,1);
10 aidx = self.servt_classes_updmap(r,2);
11 nodeidx = self.servt_classes_updmap(r,3);
12 classidx = self.servt_classes_updmap(r,4);
13
14 % store the residence times and tput at this layer to become
15 % the servt / tputs of aidx in another layer, as needed
16 iter_min = min(30,ceil(self.options.iter_max/4));
17 wnd_size = (it-self.averagingstart+1);
18
19 % Compute residt from QN/TN_ref instead of WN to avoid
20 % fork+loop visit distortion (WN uses visits from DTMC solve
21 % which are distorted when Fork non-stochastic rows coexist
22 % with loop back-edges in the routing matrix)
23 layerIdx = self.idxhash(idx);
24 layerSn = ensemble{layerIdx}.getStruct();
25 c = find(layerSn.chains(:, classidx), 1);
26 refclass_c = layerSn.refclass(c);
27 refstat_k = layerSn.refstat(classidx);
28
29 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
30 self.servt(aidx) = 0;
31 self.residt(aidx) = 0;
32 self.tput(aidx) = 0;
33 for w=0:(wnd_size-1)
34 self.servt(aidx) = self.servt(aidx) + self.results{end-w,layerIdx}.RN(nodeidx,classidx) / wnd_size;
35 TN_ref = self.results{end-w,layerIdx}.TN(refstat_k, refclass_c);
36 if TN_ref > GlobalConstants.FineTol
37 self.residt(aidx) = self.residt(aidx) + self.results{end-w,layerIdx}.QN(nodeidx,classidx) / TN_ref / wnd_size;
38 else
39 self.residt(aidx) = self.residt(aidx) + self.results{end-w,layerIdx}.WN(nodeidx,classidx) / wnd_size;
40 end
41 self.tput(aidx) = self.tput(aidx) + self.results{end-w,layerIdx}.TN(nodeidx,classidx) / wnd_size;
42 end
43 else
44 self.servt(aidx) = self.results{end,layerIdx}.RN(nodeidx,classidx);
45 TN_ref = self.results{end,layerIdx}.TN(refstat_k, refclass_c);
46 QN_val = self.results{end,layerIdx}.QN(nodeidx,classidx);
47 if TN_ref > GlobalConstants.FineTol
48 self.residt(aidx) = QN_val / TN_ref;
49 else
50 self.residt(aidx) = self.results{end,layerIdx}.WN(nodeidx,classidx);
51 end
52 self.tput(aidx) = self.results{end,layerIdx}.TN(nodeidx,classidx);
53 end
54
55 % Fix for async-only entry targets: use RN (response time per visit) for residt
56 % The host layer closed model incorrectly splits residence time (WN) between
57 % activities when an entry only receives async calls (no sync callers).
58 % For async-only entries, use RN instead of WN since the async arrivals
59 % don't share the closed chain's visit ratio - each async arrival gets
60 % the full response time per visit.
61 if aidx > lqn.ashift && aidx <= lqn.ashift + lqn.nacts
62 % This is an activity - find its bound entry
63 for eidx = (lqn.eshift+1):(lqn.eshift+lqn.nentries)
64 if full(lqn.graph(eidx, aidx)) > 0
65 % Found bound entry - check if async-only
66 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
67 hasAsyncCallers = full(any(lqn.isasynccaller(:, eidx)));
68 if hasAsyncCallers && ~hasSyncCallers
69 % Async-only target: use RN (response time per visit)
70 % instead of WN (residence time with visit ratio)
71 self.residt(aidx) = self.servt(aidx); % servt already has RN
72 end
73 break;
74 end
75 end
76 end
77
78 % Recover from Inf/NaN: snap back to previous iteration's value
79 if it > 1
80 if (isinf(self.servt(aidx)) || isnan(self.servt(aidx))) && ~isnan(self.servt_prev(aidx))
81 self.servt(aidx) = self.servt_prev(aidx);
82 end
83 if (isinf(self.residt(aidx)) || isnan(self.residt(aidx))) && ~isnan(self.residt_prev(aidx))
84 self.residt(aidx) = self.residt_prev(aidx);
85 end
86 if (isinf(self.tput(aidx)) || isnan(self.tput(aidx))) && ~isnan(self.tput_prev(aidx))
87 self.tput(aidx) = self.tput_prev(aidx);
88 end
89 end
90
91 % Apply under-relaxation if enabled and not first iteration
92 omega = self.relax_omega;
93 if omega < 1.0 && it > 1
94 if ~isnan(self.servt_prev(aidx))
95 self.servt(aidx) = omega * self.servt(aidx) + (1 - omega) * self.servt_prev(aidx);
96 end
97 if ~isnan(self.residt_prev(aidx))
98 self.residt(aidx) = omega * self.residt(aidx) + (1 - omega) * self.residt_prev(aidx);
99 end
100 if ~isnan(self.tput_prev(aidx))
101 self.tput(aidx) = omega * self.tput(aidx) + (1 - omega) * self.tput_prev(aidx);
102 end
103 end
104 % Store current values for next iteration
105 self.servt_prev(aidx) = self.servt(aidx);
106 self.residt_prev(aidx) = self.residt(aidx);
107 self.tput_prev(aidx) = self.tput(aidx);
108
109 % Safeguard against MVA numerical instability producing extreme values
110 % (matches Python _update_metrics_default max_servt guard)
111 max_servt = 1e10;
112 if self.servt(aidx) > 0 && self.servt(aidx) <= max_servt
113 self.servtproc{aidx} = Exp.fitMean(self.servt(aidx));
114 end
115 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
116end
117
118% Phase-2 support: split activity service times by phase
119% Note: Overtaking probability is computed later after entry throughput is available
120if self.hasPhase2
121 % Reset phase-specific arrays
122 self.servt_ph1 = zeros(lqn.nidx, 1);
123 self.servt_ph2 = zeros(lqn.nidx, 1);
124
125 % Split activity service times by phase
126 for a = 1:lqn.nacts
127 aidx = lqn.ashift + a;
128 if lqn.actphase(a) == 1
129 self.servt_ph1(aidx) = self.servt(aidx);
130 else
131 self.servt_ph2(aidx) = self.servt(aidx);
132 end
133 end
134
135 % Aggregate phase service times to entry level
136 for e = 1:lqn.nentries
137 eidx = lqn.eshift + e;
138 acts = lqn.actsof{eidx};
139 for aidx = acts
140 a = aidx - lqn.ashift;
141 if a > 0 && a <= lqn.nacts
142 if lqn.actphase(a) == 1
143 self.servt_ph1(eidx) = self.servt_ph1(eidx) + self.servt_ph1(aidx);
144 else
145 self.servt_ph2(eidx) = self.servt_ph2(eidx) + self.servt_ph2(aidx);
146 end
147 end
148 end
149 end
150end
151
152% obtain throughput for activities in thinkt_classes_updmap (needed for async calls)
153% this ensures tputproc is set for activities that make async calls from client nodes
154for r=1:size(self.thinkt_classes_updmap,1)
155 idx = self.thinkt_classes_updmap(r,1);
156 aidx = self.thinkt_classes_updmap(r,2);
157 nodeidx = self.thinkt_classes_updmap(r,3);
158 classidx = self.thinkt_classes_updmap(r,4);
159
160 % only update if not already set by servt_classes_updmap processing
161 if isempty(self.tputproc) || length(self.tputproc) < aidx || isempty(self.tputproc{aidx})
162 iter_min = min(30,ceil(self.options.iter_max/4));
163 wnd_size = (it-self.averagingstart+1);
164 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
165 self.tput(aidx) = 0;
166 for w=0:(wnd_size-1)
167 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
168 end
169 else
170 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
171 end
172 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
173 end
174end
175
176% Obtain the join times for AND-Join activities
177% For each AND-join activity, compute the synchronization delay as
178% the maximum of predecessor branch service times.
179self.joint = zeros(lqn.nidx,1);
180joinedacts = find(lqn.actpretype == ActivityPrecedenceType.PRE_AND)';
181for aidx = joinedacts
182 % Find predecessor activities in the activity graph
183 preds = find(lqn.graph(:, aidx) > 0)';
184 pred_servts = [];
185 for pidx = preds
186 if pidx > lqn.ashift && pidx <= lqn.ashift + lqn.nacts
187 pred_servts(end+1) = self.servt(pidx); %#ok<AGROW>
188 end
189 end
190 if ~isempty(pred_servts)
191 % Join time = maximum of predecessor branch service times
192 % This represents the synchronization delay at the AND-join point.
193 % For exponentially distributed branch times with means t_1,...,t_k,
194 % approximate E[max] using the Clark (1961) two-moment method
195 % recursively for k branches.
196 if length(pred_servts) == 1
197 self.joint(aidx) = pred_servts(1);
198 else
199 % Recursive two-moment approximation for max of exponentials
200 % Start with first two branches, then fold in remaining
201 mu_max = pred_servts(1);
202 for bi = 2:length(pred_servts)
203 t1 = mu_max;
204 t2 = pred_servts(bi);
205 if t1 > 0 && t2 > 0
206 % For exponentials: E[max(X1,X2)] = E[X1] + E[X2] - E[min(X1,X2)]
207 % E[min(X1,X2)] = 1/(1/t1 + 1/t2) = t1*t2/(t1+t2)
208 mu_max = t1 + t2 - t1*t2/(t1+t2);
209 else
210 mu_max = max(t1, t2);
211 end
212 end
213 self.joint(aidx) = mu_max;
214 end
215 end
216end
217
218% obtain the call residence time
219self.callservt = zeros(lqn.ncalls,1);
220self.callresidt = zeros(lqn.ncalls,1);
221for r=1:size(self.call_classes_updmap,1)
222 idx = self.call_classes_updmap(r,1);
223 cidx = self.call_classes_updmap(r,2);
224 nodeidx = self.call_classes_updmap(r,3);
225 classidx = self.call_classes_updmap(r,4);
226 if self.call_classes_updmap(r,3) > 1
227 if nodeidx == 1
228 self.callservt(cidx) = 0;
229 else
230 self.callservt(cidx) = self.results{end, self.idxhash(idx)}.RN(nodeidx,classidx) * self.lqn.callproc{cidx}.getMean;
231 self.callresidt(cidx) = self.results{end, self.idxhash(idx)}.WN(nodeidx,classidx);
232 end
233 % Growth rate capping removed - it prevents callservt from converging
234 % to the correct value when initial values are near-zero (Immediate)
235 % Apply under-relaxation to call service times
236 omega = self.relax_omega;
237 if omega < 1.0 && it > 1 && ~isnan(self.callservt_prev(cidx))
238 self.callservt(cidx) = omega * self.callservt(cidx) + (1 - omega) * self.callservt_prev(cidx);
239 end
240 self.callservt_prev(cidx) = self.callservt(cidx);
241 self.callresidt_prev(cidx) = self.callresidt(cidx);
242 end
243end
244
245% then resolve the entry servt summing up these contributions
246entry_servt = self.servtmatrix*[self.residt;self.callresidt(:)];
247entry_servt(1:lqn.eshift) = 0;
248
249% Update forwarding call service times from the target entry's service time.
250% Instead of bundling the FWD target's time into the source entry (which
251% overestimates task utilization), we set the FWD call's own service time.
252% The FWD call class in the task layer models the additional blocking time
253% the caller sees while the forwarding target processes the request.
254for cidx = 1:lqn.ncalls
255 if lqn.calltype(cidx) == CallType.FWD
256 target_eidx = lqn.callpair(cidx, 2);
257 fwd_prob = lqn.callproc{cidx}.getMean();
258
259 % Get target entry's service time
260 target_servt = entry_servt(target_eidx);
261
262 % If target entry doesn't have computed service time,
263 % compute it directly from its activities' host demands
264 if target_servt == 0 || isnan(target_servt)
265 target_servt = 0;
266 for aidx = lqn.actsof{target_eidx}
267 if ~isempty(lqn.hostdem{aidx})
268 target_servt = target_servt + lqn.hostdem{aidx}.getMean();
269 end
270 end
271 end
272
273 if target_servt > 0
274 self.callservt(cidx) = target_servt * fwd_prob;
275 self.callresidt(cidx) = target_servt;
276 self.callservtproc{cidx} = Exp.fitMean(self.callservt(cidx));
277 end
278 end
279end
280
281% Add forwarding delay to the SYNC calls that trigger forwarding.
282% The caller is blocked until the entire forwarding chain completes,
283% so the SYNC call's service time must include the FWD delay.
284for cidx = 1:lqn.ncalls
285 if lqn.calltype(cidx) == CallType.FWD
286 source_eidx = lqn.callpair(cidx, 1);
287 for sync_cidx = 1:lqn.ncalls
288 if lqn.calltype(sync_cidx) == CallType.SYNC && lqn.callpair(sync_cidx, 2) == source_eidx
289 self.callservt(sync_cidx) = self.callservt(sync_cidx) + self.callservt(cidx);
290 self.callresidt(sync_cidx) = self.callresidt(sync_cidx) + self.callresidt(cidx);
291 end
292 end
293 end
294end
295% Recompute entry_servt with forwarding-adjusted callresidt
296entry_servt = self.servtmatrix*[self.residt;self.callresidt(:)];
297entry_servt(1:lqn.eshift) = 0;
298
299% this block fixes the problem that ResidT is scaled so that the
300% task has Vtask=1, but in call servt the entries need to have Ventry=1
301for eidx=(lqn.eshift+1):(lqn.eshift+lqn.nentries)
302 tidx = lqn.parent(eidx); % task of entry
303 hidx = lqn.parent(tidx); %host of entry
304 if ~self.ignore(tidx) && ~self.ignore(hidx)
305 % Check if this entry has sync callers (which create closed classes)
306 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
307
308 if hasSyncCallers
309 % Original logic for entries with sync callers
310 % get class in host layer of task and entry
311 tidxclass = ensemble{self.idxhash(hidx)}.attribute.tasks(find(ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == tidx),1);
312 eidxclass = ensemble{self.idxhash(hidx)}.attribute.entries(find(ensemble{self.idxhash(hidx)}.attribute.entries(:,2) == eidx),1);
313 task_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,tidxclass));
314 entry_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,eidxclass));
315 if entry_tput > GlobalConstants.Zero
316 self.servt(eidx) = entry_servt(eidx) * task_tput / entry_tput;
317 self.residt(eidx) = entry_servt(eidx) * task_tput / entry_tput;
318 else
319 self.servt(eidx) = entry_servt(eidx);
320 self.residt(eidx) = entry_servt(eidx);
321 end
322 else
323 % For async-only targets, use entry_servt directly
324 % No throughput ratio scaling needed since there are no closed classes
325 self.servt(eidx) = entry_servt(eidx);
326 self.residt(eidx) = entry_servt(eidx);
327 end
328 end
329end
330
331% Phase-2 support: compute overtaking probability and apply correction
332% This must happen AFTER entry throughput is available (computed above)
333if self.hasPhase2
334 for e = 1:lqn.nentries
335 eidx = lqn.eshift + e;
336 tidx = lqn.parent(eidx);
337
338 if self.servt_ph2(eidx) > GlobalConstants.FineTol
339 % Get entry throughput (use task throughput as approximation if entry not available)
340 if self.tput(eidx) > GlobalConstants.FineTol
341 entry_tput = self.tput(eidx);
342 elseif self.tput(tidx) > GlobalConstants.FineTol
343 entry_tput = self.tput(tidx);
344 else
345 entry_tput = 0;
346 end
347
348 % Compute overtaking probability now that throughput is available
349 if entry_tput > GlobalConstants.FineTol
350 self.prOvertake(e) = self.overtake_prob(eidx);
351 else
352 self.prOvertake(e) = 0;
353 end
354
355 % Caller's response time = phase-1 only + P(overtake) * phase-2
356 % Phase-2 only delays if overtaking occurs
357 overtake_delay = self.prOvertake(e) * self.servt_ph2(eidx);
358
359 % The caller sees phase-1 + overtaking correction (not full phase-2)
360 % self.servt(eidx) remains unchanged (phase-1 + phase-2) for utilization calculation
361 self.residt(eidx) = self.servt_ph1(eidx) + overtake_delay;
362 end
363 end
364end
365
366%self.servt(lqn.eshift+1:lqn.eshift+lqn.nentries) = entry_servt(lqn.eshift+1:lqn.eshift+lqn.nentries);
367%entry_servt((lqn.ashift+1):end) = 0;
368for r=1:size(self.call_classes_updmap,1)
369 cidx = self.call_classes_updmap(r,2);
370 eidx = lqn.callpair(cidx,2);
371 if self.call_classes_updmap(r,3) > 1
372 if self.servt(eidx) > 0
373 self.servtproc{eidx} = Exp.fitMean(self.servt(eidx));
374 end
375 end
376end
377
378% determine call response times processes
379for r=1:size(self.call_classes_updmap,1)
380 cidx = self.call_classes_updmap(r,2);
381 eidx = lqn.callpair(cidx,2);
382 if self.call_classes_updmap(r,3) > 1
383 if it==1
384 % note that respt is per visit, so number of calls is 1
385 self.callservt(cidx) = self.servt(eidx);
386 self.callservtproc{cidx} = self.servtproc{eidx};
387 else
388 % note that respt is per visit, so number of calls is 1
389 if self.callservt(cidx) > 0
390 self.callservtproc{cidx} = Exp.fitMean(self.callservt(cidx));
391 end
392 end
393 end
394end
395
396self.ensemble = ensemble;
397end