LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
updateMetricsDefault.m
1function updateMetricsDefault(self, it)
2ensemble = self.ensemble;
3lqn = self.lqn;
4
5% obtain the activity service times
6self.servt = zeros(lqn.nidx,1);
7self.residt = zeros(lqn.nidx,1);
8for r=1:size(self.servt_classes_updmap,1)
9 idx = self.servt_classes_updmap(r,1);
10 aidx = self.servt_classes_updmap(r,2);
11 nodeidx = self.servt_classes_updmap(r,3);
12 classidx = self.servt_classes_updmap(r,4);
13
14 % store the residence times and tput at this layer to become
15 % the servt / tputs of aidx in another layer, as needed
16 iter_min = min(30,ceil(self.options.iter_max/4));
17 wnd_size = (it-self.averagingstart+1);
18 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
19 self.servt(aidx) = 0;
20 self.residt(aidx) = 0;
21 self.tput(aidx) = 0;
22 for w=1:(wnd_size-1)
23 self.servt(aidx) = self.servt(aidx) + self.results{end-w,self.idxhash(idx)}.RN(nodeidx,classidx) / wnd_size;
24 self.residt(aidx) = self.residt(aidx) + self.results{end-w,self.idxhash(idx)}.WN(nodeidx,classidx) / wnd_size;
25 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
26 end
27 else
28 self.servt(aidx) = self.results{end,self.idxhash(idx)}.RN(nodeidx,classidx);
29 self.residt(aidx) = self.results{end,self.idxhash(idx)}.WN(nodeidx,classidx);
30 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
31 end
32
33 % Fix for async-only entry targets: use RN (response time per visit) for residt
34 % The host layer closed model incorrectly splits residence time (WN) between
35 % activities when an entry only receives async calls (no sync callers).
36 % For async-only entries, use RN instead of WN since the async arrivals
37 % don't share the closed chain's visit ratio - each async arrival gets
38 % the full response time per visit.
39 if aidx > lqn.ashift && aidx <= lqn.ashift + lqn.nacts
40 % This is an activity - find its bound entry
41 for eidx = (lqn.eshift+1):(lqn.eshift+lqn.nentries)
42 if full(lqn.graph(eidx, aidx)) > 0
43 % Found bound entry - check if async-only
44 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
45 hasAsyncCallers = full(any(lqn.isasynccaller(:, eidx)));
46 if hasAsyncCallers && ~hasSyncCallers
47 % Async-only target: use RN (response time per visit)
48 % instead of WN (residence time with visit ratio)
49 self.residt(aidx) = self.servt(aidx); % servt already has RN
50 end
51 break;
52 end
53 end
54 end
55
56 % Apply under-relaxation if enabled and not first iteration
57 omega = self.relax_omega;
58 if omega < 1.0 && it > 1
59 if ~isnan(self.servt_prev(aidx))
60 self.servt(aidx) = omega * self.servt(aidx) + (1 - omega) * self.servt_prev(aidx);
61 end
62 if ~isnan(self.residt_prev(aidx))
63 self.residt(aidx) = omega * self.residt(aidx) + (1 - omega) * self.residt_prev(aidx);
64 end
65 if ~isnan(self.tput_prev(aidx))
66 self.tput(aidx) = omega * self.tput(aidx) + (1 - omega) * self.tput_prev(aidx);
67 end
68 end
69 % Store current values for next iteration
70 self.servt_prev(aidx) = self.servt(aidx);
71 self.residt_prev(aidx) = self.residt(aidx);
72 self.tput_prev(aidx) = self.tput(aidx);
73
74 self.servtproc{aidx} = Exp.fitMean(self.servt(aidx));
75 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
76end
77
78% Phase-2 support: split activity service times by phase
79% Note: Overtaking probability is computed later after entry throughput is available
80if self.hasPhase2
81 % Reset phase-specific arrays
82 self.servt_ph1 = zeros(lqn.nidx, 1);
83 self.servt_ph2 = zeros(lqn.nidx, 1);
84
85 % Split activity service times by phase
86 for a = 1:lqn.nacts
87 aidx = lqn.ashift + a;
88 if lqn.actphase(a) == 1
89 self.servt_ph1(aidx) = self.servt(aidx);
90 else
91 self.servt_ph2(aidx) = self.servt(aidx);
92 end
93 end
94
95 % Aggregate phase service times to entry level
96 for e = 1:lqn.nentries
97 eidx = lqn.eshift + e;
98 acts = lqn.actsof{eidx};
99 for aidx = acts
100 a = aidx - lqn.ashift;
101 if a > 0 && a <= lqn.nacts
102 if lqn.actphase(a) == 1
103 self.servt_ph1(eidx) = self.servt_ph1(eidx) + self.servt_ph1(aidx);
104 else
105 self.servt_ph2(eidx) = self.servt_ph2(eidx) + self.servt_ph2(aidx);
106 end
107 end
108 end
109 end
110end
111
112% obtain throughput for activities in thinkt_classes_updmap (needed for async calls)
113% this ensures tputproc is set for activities that make async calls from client nodes
114for r=1:size(self.thinkt_classes_updmap,1)
115 idx = self.thinkt_classes_updmap(r,1);
116 aidx = self.thinkt_classes_updmap(r,2);
117 nodeidx = self.thinkt_classes_updmap(r,3);
118 classidx = self.thinkt_classes_updmap(r,4);
119
120 % only update if not already set by servt_classes_updmap processing
121 if isempty(self.tputproc) || length(self.tputproc) < aidx || isempty(self.tputproc{aidx})
122 iter_min = min(30,ceil(self.options.iter_max/4));
123 wnd_size = (it-self.averagingstart+1);
124 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
125 self.tput(aidx) = 0;
126 for w=1:(wnd_size-1)
127 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
128 end
129 else
130 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
131 end
132 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
133 end
134end
135
136% TODO: obtain the join times
137%self.joint = zeros(lqn.nidx,1);
138%joinedacts = find(lqn.actpretype == ActivityPrecedenceType.PRE_AND)';
139
140% obtain the call residence time
141self.callservt = zeros(lqn.ncalls,1);
142self.callresidt = zeros(lqn.ncalls,1);
143for r=1:size(self.call_classes_updmap,1)
144 idx = self.call_classes_updmap(r,1);
145 cidx = self.call_classes_updmap(r,2);
146 nodeidx = self.call_classes_updmap(r,3);
147 classidx = self.call_classes_updmap(r,4);
148 if self.call_classes_updmap(r,3) > 1
149 if nodeidx == 1
150 self.callservt(cidx) = 0;
151 else
152 self.callservt(cidx) = self.results{end, self.idxhash(idx)}.RN(nodeidx,classidx) * self.lqn.callproc{cidx}.getMean;
153 self.callresidt(cidx) = self.results{end, self.idxhash(idx)}.WN(nodeidx,classidx);
154 end
155 % Apply under-relaxation to call service times
156 omega = self.relax_omega;
157 if omega < 1.0 && it > 1 && ~isnan(self.callservt_prev(cidx))
158 self.callservt(cidx) = omega * self.callservt(cidx) + (1 - omega) * self.callservt_prev(cidx);
159 end
160 self.callservt_prev(cidx) = self.callservt(cidx);
161 end
162end
163
164% then resolve the entry servt summing up these contributions
165%entry_servt = zeros(lqn.nidx,1);
166entry_servt = self.servtmatrix*[self.residt;self.callresidt(:)]; % Sum the residT of all the activities connected to this entry
167entry_servt(1:lqn.eshift) = 0;
168
169
170% Propagate forwarding calls: add target entry's service time to source entry
171% When e0 forwards to e1 with probability p, callers of e0 see:
172% e0's service + p * e1's service
173% Process in topological order to handle forwarding chains correctly
174for cidx = 1:lqn.ncalls
175 if lqn.calltype(cidx) == CallType.FWD
176 source_eidx = lqn.callpair(cidx, 1);
177 target_eidx = lqn.callpair(cidx, 2);
178 fwd_prob = lqn.callproc{cidx}.getMean();
179
180 % Get target entry's service time
181 target_servt = entry_servt(target_eidx);
182
183 % If target entry doesn't have computed service time (forwarding-only target),
184 % compute it directly from its activities' host demands
185 if target_servt == 0 || isnan(target_servt)
186 target_servt = 0;
187 % Sum host demands of all activities bound to this entry
188 for aidx = lqn.actsof{target_eidx}
189 if ~isempty(lqn.hostdem{aidx})
190 target_servt = target_servt + lqn.hostdem{aidx}.getMean();
191 end
192 end
193 end
194
195 entry_servt(source_eidx) = entry_servt(source_eidx) + fwd_prob * target_servt;
196 end
197end
198
199% this block fixes the problem that ResidT is scaled so that the
200% task has Vtask=1, but in call servt the entries need to have Ventry=1
201for eidx=(lqn.eshift+1):(lqn.eshift+lqn.nentries)
202 tidx = lqn.parent(eidx); % task of entry
203 hidx = lqn.parent(tidx); %host of entry
204 if ~self.ignore(tidx) && ~self.ignore(hidx)
205 % Check if this entry has sync callers (which create closed classes)
206 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
207
208 if hasSyncCallers
209 % Original logic for entries with sync callers
210 % get class in host layer of task and entry
211 tidxclass = ensemble{self.idxhash(hidx)}.attribute.tasks(find(ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == tidx),1);
212 eidxclass = ensemble{self.idxhash(hidx)}.attribute.entries(find(ensemble{self.idxhash(hidx)}.attribute.entries(:,2) == eidx),1);
213 task_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,tidxclass));
214 entry_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,eidxclass));
215 %entry_servt_refstat = self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.refstat;
216 %entry_servt_z = entry_servt_refstat.serviceProcess{self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.index}.getMean();
217 %entry_servt(eidx) = self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.population / entry_tput - entry_servt_z;
218 self.servt(eidx) = entry_servt(eidx) * task_tput / max(GlobalConstants.Zero, entry_tput);
219 self.residt(eidx) = entry_servt(eidx) * task_tput / max(GlobalConstants.Zero, entry_tput);
220 else
221 % For async-only targets, use entry_servt directly
222 % No throughput ratio scaling needed since there are no closed classes
223 self.servt(eidx) = entry_servt(eidx);
224 self.residt(eidx) = entry_servt(eidx);
225 end
226 end
227end
228
229% Phase-2 support: compute overtaking probability and apply correction
230% This must happen AFTER entry throughput is available (computed above)
231if self.hasPhase2
232 for e = 1:lqn.nentries
233 eidx = lqn.eshift + e;
234 tidx = lqn.parent(eidx);
235
236 if self.servt_ph2(eidx) > GlobalConstants.FineTol
237 % Get entry throughput (use task throughput as approximation if entry not available)
238 if self.tput(eidx) > GlobalConstants.FineTol
239 entry_tput = self.tput(eidx);
240 elseif self.tput(tidx) > GlobalConstants.FineTol
241 entry_tput = self.tput(tidx);
242 else
243 entry_tput = 0;
244 end
245
246 % Compute overtaking probability now that throughput is available
247 if entry_tput > GlobalConstants.FineTol
248 self.prOvertake(e) = self.overtake_prob(eidx);
249 else
250 self.prOvertake(e) = 0;
251 end
252
253 % Caller's response time = phase-1 only + P(overtake) * phase-2
254 % Phase-2 only delays if overtaking occurs
255 overtake_delay = self.prOvertake(e) * self.servt_ph2(eidx);
256
257 % The caller sees phase-1 + overtaking correction (not full phase-2)
258 % self.servt(eidx) remains unchanged (phase-1 + phase-2) for utilization calculation
259 self.residt(eidx) = self.servt_ph1(eidx) + overtake_delay;
260 end
261 end
262end
263
264%self.servt(lqn.eshift+1:lqn.eshift+lqn.nentries) = entry_servt(lqn.eshift+1:lqn.eshift+lqn.nentries);
265%entry_servt((lqn.ashift+1):end) = 0;
266for r=1:size(self.call_classes_updmap,1)
267 cidx = self.call_classes_updmap(r,2);
268 eidx = lqn.callpair(cidx,2);
269 if self.call_classes_updmap(r,3) > 1
270 self.servtproc{eidx} = Exp.fitMean(self.servt(eidx));
271 end
272end
273
274% determine call response times processes
275for r=1:size(self.call_classes_updmap,1)
276 cidx = self.call_classes_updmap(r,2);
277 eidx = lqn.callpair(cidx,2);
278 if self.call_classes_updmap(r,3) > 1
279 if it==1
280 % note that respt is per visit, so number of calls is 1
281 self.callservt(cidx) = self.servt(eidx);
282 self.callservtproc{cidx} = self.servtproc{eidx};
283 else
284 % note that respt is per visit, so number of calls is 1
285 self.callservtproc{cidx} = Exp.fitMean(self.callservt(cidx));
286 end
287 end
288end
289
290self.ptaskcallers = zeros(size(self.ptaskcallers));
291% determine ptaskcallers for direct callers to tasks
292for t = 1:lqn.ntasks
293 tidx = lqn.tshift + t;
294 if ~lqn.isref(tidx)
295 [calling_idx, ~] = find(lqn.iscaller(:, lqn.entriesof{tidx})); %#ok<ASGLU>
296 callers = intersect(lqn.tshift+(1:lqn.ntasks), unique(calling_idx)');
297 caller_tput = zeros(1,lqn.ntasks);
298 for caller_idx=callers(:)'
299 caller_idxclass = self.ensemble{self.idxhash(tidx)}.attribute.tasks(1+find(self.ensemble{self.idxhash(tidx)}.attribute.tasks(2:end,2) == caller_idx),1);
300 caller_tput(caller_idx-lqn.tshift) = sum(self.results{end,self.idxhash(tidx)}.TN(self.ensemble{self.idxhash(tidx)}.attribute.clientIdx,caller_idxclass));
301 end
302 self.ptaskcallers(tidx,(lqn.tshift+1):(lqn.tshift+lqn.ntasks))= caller_tput / max(GlobalConstants.Zero, sum(caller_tput));
303 end
304end
305
306% determine ptaskcallers for direct callers to hosts
307for hidx = 1:lqn.nhosts
308 if ~self.ignore(tidx) && ~self.ignore(hidx)
309 caller_tput = zeros(1,lqn.ntasks);
310 callers = lqn.tasksof{hidx};
311 for caller_idx=callers
312 caller_idxclass = self.ensemble{self.idxhash(hidx)}.attribute.tasks(find(self.ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == caller_idx),1);
313 caller_tput(caller_idx-lqn.tshift) = caller_tput(caller_idx-lqn.tshift) + sum(self.results{end,self.idxhash(hidx)}.TN(self.ensemble{self.idxhash(hidx)}.attribute.clientIdx,caller_idxclass));
314 end
315 self.ptaskcallers(hidx,(lqn.tshift+1):(lqn.tshift+lqn.ntasks)) = caller_tput / max(GlobalConstants.Zero, sum(caller_tput));
316 end
317end
318
319% impute call probability using a DTMC random walk on the taskcaller graph
320P = self.ptaskcallers;
321P = dtmc_makestochastic(P); % hold mass at reference stations when there
322self.ptaskcallers_step{1} = P; % configure step 1
323for h = 1:lqn.nhosts
324 hidx=h;
325 for tidx = lqn.tasksof{hidx}
326 % initialize the probability mass on tidx
327 x0 = zeros(length(self.ptaskcallers),1);
328 x0(hidx) = 1;
329 x0=x0(:)';
330 % start the walk backward to impute probability of indirect callers
331 x = x0*P; % skip since pcallers already calculated in this case
332 for step=2:self.nlayers % upper bound on maximum dag height
333 x = x*P;
334 % here self.ptaskcallers_step{step}(tidx,remidx) is the
335 % probability that the request to tidx comes from remidx
336 self.ptaskcallers_step{step}(tidx,:) = x(:);
337 % here self.ptaskcallers_step{step}(hidx,remidx) is the
338 % probability that the request to hidx comes from remidx
339 % through the call that tidx puts on hidx, which is
340 % weighted by the relative tput of tidx on the tasks running on
341 % hidx
342 self.ptaskcallers_step{step}(hidx,:) = self.ptaskcallers(hidx,tidx)*x(:)';
343 if sum(x(find(lqn.isref)))>1.0-self.options.tol %#ok<FNDSB>
344 % if all the probability mass has reached backwards the
345 % reference stations, then stop
346 break;
347 end
348 self.ptaskcallers(:,tidx) = max([self.ptaskcallers(:,tidx), x(:)],[],2);
349 end
350 end
351end
352self.ensemble = ensemble;
353end
Definition mmt.m:92