1function updateMetricsDefault(self, it)
2ensemble = self.ensemble;
5% obtain the activity service times
6self.servt = zeros(lqn.nidx,1);
7self.residt = zeros(lqn.nidx,1);
8for r=1:size(self.servt_classes_updmap,1)
9 idx = self.servt_classes_updmap(r,1);
10 aidx = self.servt_classes_updmap(r,2);
11 nodeidx = self.servt_classes_updmap(r,3);
12 classidx = self.servt_classes_updmap(r,4);
14 % store the residence times and tput at this layer to become
15 % the servt / tputs of aidx in another layer, as needed
16 iter_min = min(30,ceil(self.options.iter_max/4));
17 wnd_size = (it-self.averagingstart+1);
18 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
20 self.residt(aidx) = 0;
23 self.servt(aidx) = self.servt(aidx) + self.results{end-w,self.idxhash(idx)}.RN(nodeidx,classidx) / wnd_size;
24 self.residt(aidx) = self.residt(aidx) + self.results{end-w,self.idxhash(idx)}.WN(nodeidx,classidx) / wnd_size;
25 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
28 self.servt(aidx) = self.results{end,self.idxhash(idx)}.RN(nodeidx,classidx);
29 self.residt(aidx) = self.results{end,self.idxhash(idx)}.WN(nodeidx,classidx);
30 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
33 % Fix
for async-only entry targets: use RN (response time per visit)
for residt
34 % The host layer closed model incorrectly splits residence time (WN) between
35 % activities when an entry only receives async calls (no sync callers).
36 % For async-only entries, use RN instead of WN since the async arrivals
37 % don
't share the closed chain's visit ratio - each async arrival gets
38 % the full response time per visit.
39 if aidx > lqn.ashift && aidx <= lqn.ashift + lqn.nacts
40 % This
is an activity - find its bound entry
41 for eidx = (lqn.eshift+1):(lqn.eshift+lqn.nentries)
42 if full(lqn.graph(eidx, aidx)) > 0
43 % Found bound entry - check if async-only
44 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
45 hasAsyncCallers = full(any(lqn.isasynccaller(:, eidx)));
46 if hasAsyncCallers && ~hasSyncCallers
47 % Async-only target: use RN (response time per visit)
48 % instead of WN (residence time with visit ratio)
49 self.residt(aidx) = self.servt(aidx); % servt already has RN
56 % Apply under-relaxation
if enabled and not first iteration
57 omega = self.relax_omega;
58 if omega < 1.0 && it > 1
59 if ~isnan(self.servt_prev(aidx))
60 self.servt(aidx) = omega * self.servt(aidx) + (1 - omega) * self.servt_prev(aidx);
62 if ~isnan(self.residt_prev(aidx))
63 self.residt(aidx) = omega * self.residt(aidx) + (1 - omega) * self.residt_prev(aidx);
65 if ~isnan(self.tput_prev(aidx))
66 self.tput(aidx) = omega * self.tput(aidx) + (1 - omega) * self.tput_prev(aidx);
69 % Store current values
for next iteration
70 self.servt_prev(aidx) = self.servt(aidx);
71 self.residt_prev(aidx) = self.residt(aidx);
72 self.tput_prev(aidx) = self.tput(aidx);
74 self.servtproc{aidx} = Exp.fitMean(self.servt(aidx));
75 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
78% Phase-2 support: split activity service times by phase
79% Note: Overtaking probability
is computed later after entry throughput
is available
81 % Reset phase-specific arrays
82 self.servt_ph1 = zeros(lqn.nidx, 1);
83 self.servt_ph2 = zeros(lqn.nidx, 1);
85 % Split activity service times by phase
87 aidx = lqn.ashift + a;
88 if lqn.actphase(a) == 1
89 self.servt_ph1(aidx) = self.servt(aidx);
91 self.servt_ph2(aidx) = self.servt(aidx);
95 % Aggregate phase service times to entry level
96 for e = 1:lqn.nentries
97 eidx = lqn.eshift + e;
98 acts = lqn.actsof{eidx};
100 a = aidx - lqn.ashift;
101 if a > 0 && a <= lqn.nacts
102 if lqn.actphase(a) == 1
103 self.servt_ph1(eidx) = self.servt_ph1(eidx) + self.servt_ph1(aidx);
105 self.servt_ph2(eidx) = self.servt_ph2(eidx) + self.servt_ph2(aidx);
112% obtain throughput
for activities in thinkt_classes_updmap (needed
for async calls)
113%
this ensures tputproc
is set
for activities that make async calls from client
nodes
114for r=1:size(self.thinkt_classes_updmap,1)
115 idx = self.thinkt_classes_updmap(r,1);
116 aidx = self.thinkt_classes_updmap(r,2);
117 nodeidx = self.thinkt_classes_updmap(r,3);
118 classidx = self.thinkt_classes_updmap(r,4);
120 % only update if not already set by servt_classes_updmap processing
121 if isempty(self.tputproc) || length(self.tputproc) < aidx || isempty(self.tputproc{aidx})
122 iter_min = min(30,ceil(self.options.iter_max/4));
123 wnd_size = (it-self.averagingstart+1);
124 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
127 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
130 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
132 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
136% TODO: obtain the join times
137%self.joint = zeros(lqn.nidx,1);
138%joinedacts = find(lqn.actpretype == ActivityPrecedenceType.PRE_AND)
';
140% obtain the call residence time
141self.callservt = zeros(lqn.ncalls,1);
142self.callresidt = zeros(lqn.ncalls,1);
143for r=1:size(self.call_classes_updmap,1)
144 idx = self.call_classes_updmap(r,1);
145 cidx = self.call_classes_updmap(r,2);
146 nodeidx = self.call_classes_updmap(r,3);
147 classidx = self.call_classes_updmap(r,4);
148 if self.call_classes_updmap(r,3) > 1
150 self.callservt(cidx) = 0;
152 self.callservt(cidx) = self.results{end, self.idxhash(idx)}.RN(nodeidx,classidx) * self.lqn.callproc{cidx}.getMean;
153 self.callresidt(cidx) = self.results{end, self.idxhash(idx)}.WN(nodeidx,classidx);
155 % Apply under-relaxation to call service times
156 omega = self.relax_omega;
157 if omega < 1.0 && it > 1 && ~isnan(self.callservt_prev(cidx))
158 self.callservt(cidx) = omega * self.callservt(cidx) + (1 - omega) * self.callservt_prev(cidx);
160 self.callservt_prev(cidx) = self.callservt(cidx);
164% then resolve the entry servt summing up these contributions
165%entry_servt = zeros(lqn.nidx,1);
166entry_servt = self.servtmatrix*[self.residt;self.callresidt(:)]; % Sum the residT of all the activities connected to this entry
167entry_servt(1:lqn.eshift) = 0;
169% Propagate forwarding calls: add target entry's service time to source entry
170% When e0 forwards to e1 with probability p, callers of e0 see:
171% e0
's service + p * e1's service
172% Process in topological order to handle forwarding chains correctly
173for cidx = 1:lqn.ncalls
174 if lqn.calltype(cidx) == CallType.FWD
175 source_eidx = lqn.callpair(cidx, 1);
176 target_eidx = lqn.callpair(cidx, 2);
177 fwd_prob = lqn.callproc{cidx}.getMean();
179 % Get target entry
's service time
180 target_servt = entry_servt(target_eidx);
182 % If target entry doesn't have computed service time (forwarding-only target),
183 % compute it directly from its activities
' host demands
184 if target_servt == 0 || isnan(target_servt)
186 % Sum host demands of all activities bound to this entry
187 for aidx = lqn.actsof{target_eidx}
188 if ~isempty(lqn.hostdem{aidx})
189 target_servt = target_servt + lqn.hostdem{aidx}.getMean();
194 entry_servt(source_eidx) = entry_servt(source_eidx) + fwd_prob * target_servt;
198% this block fixes the problem that ResidT is scaled so that the
199% task has Vtask=1, but in call servt the entries need to have Ventry=1
200for eidx=(lqn.eshift+1):(lqn.eshift+lqn.nentries)
201 tidx = lqn.parent(eidx); % task of entry
202 hidx = lqn.parent(tidx); %host of entry
203 if ~self.ignore(tidx) && ~self.ignore(hidx)
204 % Check if this entry has sync callers (which create closed classes)
205 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
208 % Original logic for entries with sync callers
209 % get class in host layer of task and entry
210 tidxclass = ensemble{self.idxhash(hidx)}.attribute.tasks(find(ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == tidx),1);
211 eidxclass = ensemble{self.idxhash(hidx)}.attribute.entries(find(ensemble{self.idxhash(hidx)}.attribute.entries(:,2) == eidx),1);
212 task_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,tidxclass));
213 entry_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,eidxclass));
214 %entry_servt_refstat = self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.refstat;
215 %entry_servt_z = entry_servt_refstat.serviceProcess{self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.index}.getMean();
216 %entry_servt(eidx) = self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.population / entry_tput - entry_servt_z;
217 self.servt(eidx) = entry_servt(eidx) * task_tput / max(GlobalConstants.Zero, entry_tput);
218 self.residt(eidx) = entry_servt(eidx) * task_tput / max(GlobalConstants.Zero, entry_tput);
220 % For async-only targets, use entry_servt directly
221 % No throughput ratio scaling needed since there are no closed classes
222 self.servt(eidx) = entry_servt(eidx);
223 self.residt(eidx) = entry_servt(eidx);
228% Phase-2 support: compute overtaking probability and apply correction
229% This must happen AFTER entry throughput is available (computed above)
231 for e = 1:lqn.nentries
232 eidx = lqn.eshift + e;
233 tidx = lqn.parent(eidx);
235 if self.servt_ph2(eidx) > GlobalConstants.FineTol
236 % Get entry throughput (use task throughput as approximation if entry not available)
237 if self.tput(eidx) > GlobalConstants.FineTol
238 entry_tput = self.tput(eidx);
239 elseif self.tput(tidx) > GlobalConstants.FineTol
240 entry_tput = self.tput(tidx);
245 % Compute overtaking probability now that throughput is available
246 if entry_tput > GlobalConstants.FineTol
247 self.prOvertake(e) = self.overtake_prob(eidx);
249 self.prOvertake(e) = 0;
252 % Caller's response time = phase-1 only +
P(overtake) * phase-2
253 % Phase-2 only delays
if overtaking occurs
254 overtake_delay = self.prOvertake(e) * self.servt_ph2(eidx);
256 % The caller sees phase-1 + overtaking correction (not full phase-2)
257 % self.servt(eidx) remains unchanged (phase-1 + phase-2) for utilization calculation
258 self.residt(eidx) = self.servt_ph1(eidx) + overtake_delay;
263%self.servt(lqn.eshift+1:lqn.eshift+lqn.nentries) = entry_servt(lqn.eshift+1:lqn.eshift+lqn.nentries);
264%entry_servt((lqn.ashift+1):end) = 0;
265for r=1:size(self.call_classes_updmap,1)
266 cidx = self.call_classes_updmap(r,2);
267 eidx = lqn.callpair(cidx,2);
268 if self.call_classes_updmap(r,3) > 1
269 self.servtproc{eidx} = Exp.fitMean(self.servt(eidx));
273% determine call response times processes
274for r=1:size(self.call_classes_updmap,1)
275 cidx = self.call_classes_updmap(r,2);
276 eidx = lqn.callpair(cidx,2);
277 if self.call_classes_updmap(r,3) > 1
279 % note that respt
is per visit, so number of calls
is 1
280 self.callservt(cidx) = self.servt(eidx);
281 self.callservtproc{cidx} = self.servtproc{eidx};
283 % note that respt
is per visit, so number of calls
is 1
284 self.callservtproc{cidx} = Exp.fitMean(self.callservt(cidx));
289self.ptaskcallers = zeros(size(self.ptaskcallers));
290% determine ptaskcallers
for direct callers to tasks
292 tidx = lqn.tshift + t;
294 [calling_idx, ~] = find(lqn.iscaller(:, lqn.entriesof{tidx})); %#ok<ASGLU>
295 callers = intersect(lqn.tshift+(1:lqn.ntasks), unique(calling_idx)
');
296 caller_tput = zeros(1,lqn.ntasks);
297 for caller_idx=callers(:)'
298 caller_idxclass = self.ensemble{self.idxhash(tidx)}.attribute.tasks(1+find(self.ensemble{self.idxhash(tidx)}.attribute.tasks(2:end,2) == caller_idx),1);
299 caller_tput(caller_idx-lqn.tshift) = sum(self.results{end,self.idxhash(tidx)}.TN(self.ensemble{self.idxhash(tidx)}.attribute.clientIdx,caller_idxclass));
301 self.ptaskcallers(tidx,(lqn.tshift+1):(lqn.tshift+lqn.ntasks))= caller_tput / max(GlobalConstants.Zero, sum(caller_tput));
305% determine ptaskcallers
for direct callers to hosts
306for hidx = 1:lqn.nhosts
307 if ~self.ignore(tidx) && ~self.ignore(hidx)
308 caller_tput = zeros(1,lqn.ntasks);
309 callers = lqn.tasksof{hidx};
310 for caller_idx=callers
311 caller_idxclass = self.ensemble{self.idxhash(hidx)}.attribute.tasks(find(self.ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == caller_idx),1);
312 caller_tput(caller_idx-lqn.tshift) = caller_tput(caller_idx-lqn.tshift) + sum(self.results{end,self.idxhash(hidx)}.TN(self.ensemble{self.idxhash(hidx)}.attribute.clientIdx,caller_idxclass));
314 self.ptaskcallers(hidx,(lqn.tshift+1):(lqn.tshift+lqn.ntasks)) = caller_tput / max(GlobalConstants.Zero, sum(caller_tput));
318% impute call probability
using a DTMC random walk on the taskcaller graph
319P = self.ptaskcallers;
320P = dtmc_makestochastic(
P); % hold mass at reference stations when there
321self.ptaskcallers_step{1} =
P; % configure step 1
324 for tidx = lqn.tasksof{hidx}
325 % initialize the probability mass on tidx
326 x0 = zeros(length(self.ptaskcallers),1);
329 % start the walk backward to impute probability of indirect callers
330 x = x0*P; % skip since pcallers already calculated in this case
331 for step=2:self.nlayers % upper bound on maximum dag height
333 % here self.ptaskcallers_step{step}(tidx,remidx) is the
334 % probability that the request to tidx comes from remidx
335 self.ptaskcallers_step{step}(tidx,:) = x(:);
336 % here self.ptaskcallers_step{step}(hidx,remidx) is the
337 % probability that the request to hidx comes from remidx
338 % through the call that tidx puts on hidx, which is
339 % weighted by the relative tput of tidx on the tasks running on
341 self.ptaskcallers_step{step}(hidx,:) = self.ptaskcallers(hidx,tidx)*x(:)';
342 if sum(x(find(lqn.isref)))>1.0-self.options.tol %#ok<FNDSB>
343 %
if all the probability mass has reached backwards the
344 % reference stations, then stop
347 self.ptaskcallers(:,tidx) = max([self.ptaskcallers(:,tidx), x(:)],[],2);
351self.ensemble = ensemble;