1function updateMetricsDefault(self, it)
2ensemble = self.ensemble;
5% obtain the activity service times
6self.servt = zeros(lqn.nidx,1);
7self.residt = zeros(lqn.nidx,1);
8for r=1:size(self.servt_classes_updmap,1)
9 idx = self.servt_classes_updmap(r,1);
10 aidx = self.servt_classes_updmap(r,2);
11 nodeidx = self.servt_classes_updmap(r,3);
12 classidx = self.servt_classes_updmap(r,4);
14 % store the residence times and tput at this layer to become
15 % the servt / tputs of aidx in another layer, as needed
16 iter_min = min(30,ceil(self.options.iter_max/4));
17 wnd_size = (it-self.averagingstart+1);
18 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
20 self.residt(aidx) = 0;
23 self.servt(aidx) = self.servt(aidx) + self.results{end-w,self.idxhash(idx)}.RN(nodeidx,classidx) / wnd_size;
24 self.residt(aidx) = self.residt(aidx) + self.results{end-w,self.idxhash(idx)}.WN(nodeidx,classidx) / wnd_size;
25 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
28 self.servt(aidx) = self.results{end,self.idxhash(idx)}.RN(nodeidx,classidx);
29 self.residt(aidx) = self.results{end,self.idxhash(idx)}.WN(nodeidx,classidx);
30 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
32 % Apply under-relaxation
if enabled and not first iteration
33 omega = self.relax_omega;
34 if omega < 1.0 && it > 1
35 if ~isnan(self.servt_prev(aidx))
36 self.servt(aidx) = omega * self.servt(aidx) + (1 - omega) * self.servt_prev(aidx);
38 if ~isnan(self.residt_prev(aidx))
39 self.residt(aidx) = omega * self.residt(aidx) + (1 - omega) * self.residt_prev(aidx);
41 if ~isnan(self.tput_prev(aidx))
42 self.tput(aidx) = omega * self.tput(aidx) + (1 - omega) * self.tput_prev(aidx);
45 % Store current values
for next iteration
46 self.servt_prev(aidx) = self.servt(aidx);
47 self.residt_prev(aidx) = self.residt(aidx);
48 self.tput_prev(aidx) = self.tput(aidx);
50 self.servtproc{aidx} = Exp.fitMean(self.servt(aidx));
51 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
54% obtain throughput
for activities in thinkt_classes_updmap (needed
for async calls)
55%
this ensures tputproc
is set
for activities that make async calls from client
nodes
56for r=1:size(self.thinkt_classes_updmap,1)
57 idx = self.thinkt_classes_updmap(r,1);
58 aidx = self.thinkt_classes_updmap(r,2);
59 nodeidx = self.thinkt_classes_updmap(r,3);
60 classidx = self.thinkt_classes_updmap(r,4);
62 % only update if not already set by servt_classes_updmap processing
63 if isempty(self.tputproc) || length(self.tputproc) < aidx || isempty(self.tputproc{aidx})
64 iter_min = min(30,ceil(self.options.iter_max/4));
65 wnd_size = (it-self.averagingstart+1);
66 if ~isempty(self.averagingstart) && it>=iter_min % assume steady-state
69 self.tput(aidx) = self.tput(aidx) + self.results{end-w,self.idxhash(idx)}.TN(nodeidx,classidx) / wnd_size;
72 self.tput(aidx) = self.results{end,self.idxhash(idx)}.TN(nodeidx,classidx);
74 self.tputproc{aidx} = Exp.fitRate(self.tput(aidx));
78% TODO: obtain the join times
79%self.joint = zeros(lqn.nidx,1);
80%joinedacts = find(lqn.actpretype == ActivityPrecedenceType.PRE_AND)
';
82% obtain the call residence time
83self.callservt = zeros(lqn.ncalls,1);
84self.callresidt = zeros(lqn.ncalls,1);
85for r=1:size(self.call_classes_updmap,1)
86 idx = self.call_classes_updmap(r,1);
87 cidx = self.call_classes_updmap(r,2);
88 nodeidx = self.call_classes_updmap(r,3);
89 classidx = self.call_classes_updmap(r,4);
90 if self.call_classes_updmap(r,3) > 1
92 self.callservt(cidx) = 0;
94 self.callservt(cidx) = self.results{end, self.idxhash(idx)}.RN(nodeidx,classidx) * self.lqn.callproc{cidx}.getMean;
95 self.callresidt(cidx) = self.results{end, self.idxhash(idx)}.WN(nodeidx,classidx);
97 % Apply under-relaxation to call service times
98 omega = self.relax_omega;
99 if omega < 1.0 && it > 1 && ~isnan(self.callservt_prev(cidx))
100 self.callservt(cidx) = omega * self.callservt(cidx) + (1 - omega) * self.callservt_prev(cidx);
102 self.callservt_prev(cidx) = self.callservt(cidx);
106% then resolve the entry servt summing up these contributions
107%entry_servt = zeros(lqn.nidx,1);
108entry_servt = self.servtmatrix*[self.residt;self.callresidt(:)]; % Sum the residT of all the activities connected to this entry
109entry_servt(1:lqn.eshift) = 0;
111% this block fixes the problem that ResidT is scaled so that the
112% task has Vtask=1, but in call servt the entries need to have Ventry=1
113for eidx=(lqn.eshift+1):(lqn.eshift+lqn.nentries)
114 tidx = lqn.parent(eidx); % task of entry
115 hidx = lqn.parent(tidx); %host of entry
116 if ~self.ignore(tidx) && ~self.ignore(hidx)
117 % Check if this entry has sync callers (which create closed classes)
118 hasSyncCallers = full(any(lqn.issynccaller(:, eidx)));
121 % Original logic for entries with sync callers
122 % get class in host layer of task and entry
123 tidxclass = ensemble{self.idxhash(hidx)}.attribute.tasks(find(ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == tidx),1);
124 eidxclass = ensemble{self.idxhash(hidx)}.attribute.entries(find(ensemble{self.idxhash(hidx)}.attribute.entries(:,2) == eidx),1);
125 task_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,tidxclass));
126 entry_tput = sum(self.results{end,self.idxhash(hidx)}.TN(ensemble{self.idxhash(hidx)}.attribute.clientIdx,eidxclass));
127 %entry_servt_refstat = self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.refstat;
128 %entry_servt_z = entry_servt_refstat.serviceProcess{self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.index}.getMean();
129 %entry_servt(eidx) = self.ensemble{self.idxhash(hidx)}.classes{tidxclass}.population / entry_tput - entry_servt_z;
130 self.servt(eidx) = entry_servt(eidx) * task_tput / max(GlobalConstants.Zero, entry_tput);
131 self.residt(eidx) = entry_servt(eidx) * task_tput / max(GlobalConstants.Zero, entry_tput);
133 % For async-only targets, use entry_servt directly
134 % No throughput ratio scaling needed since there are no closed classes
135 self.servt(eidx) = entry_servt(eidx);
136 self.residt(eidx) = entry_servt(eidx);
140%self.servt(lqn.eshift+1:lqn.eshift+lqn.nentries) = entry_servt(lqn.eshift+1:lqn.eshift+lqn.nentries);
141%entry_servt((lqn.ashift+1):end) = 0;
142for r=1:size(self.call_classes_updmap,1)
143 cidx = self.call_classes_updmap(r,2);
144 eidx = lqn.callpair(cidx,2);
145 if self.call_classes_updmap(r,3) > 1
146 self.servtproc{eidx} = Exp.fitMean(self.servt(eidx));
150% determine call response times processes
151for r=1:size(self.call_classes_updmap,1)
152 cidx = self.call_classes_updmap(r,2);
153 eidx = lqn.callpair(cidx,2);
154 if self.call_classes_updmap(r,3) > 1
156 % note that respt is per visit, so number of calls is 1
157 self.callservt(cidx) = self.servt(eidx);
158 self.callservtproc{cidx} = self.servtproc{eidx};
160 % note that respt is per visit, so number of calls is 1
161 self.callservtproc{cidx} = Exp.fitMean(self.callservt(cidx));
166self.ptaskcallers = zeros(size(self.ptaskcallers));
167% determine ptaskcallers for direct callers to tasks
169 tidx = lqn.tshift + t;
171 [calling_idx, ~] = find(lqn.iscaller(:, lqn.entriesof{tidx})); %#ok<ASGLU>
172 callers = intersect(lqn.tshift+(1:lqn.ntasks), unique(calling_idx)');
173 caller_tput = zeros(1,lqn.ntasks);
174 for caller_idx=callers(:)
'
175 caller_idxclass = self.ensemble{self.idxhash(tidx)}.attribute.tasks(1+find(self.ensemble{self.idxhash(tidx)}.attribute.tasks(2:end,2) == caller_idx),1);
176 caller_tput(caller_idx-lqn.tshift) = sum(self.results{end,self.idxhash(tidx)}.TN(self.ensemble{self.idxhash(tidx)}.attribute.clientIdx,caller_idxclass));
178 self.ptaskcallers(tidx,(lqn.tshift+1):(lqn.tshift+lqn.ntasks))= caller_tput / max(GlobalConstants.Zero, sum(caller_tput));
182% determine ptaskcallers for direct callers to hosts
183for hidx = 1:lqn.nhosts
184 if ~self.ignore(tidx) && ~self.ignore(hidx)
185 caller_tput = zeros(1,lqn.ntasks);
186 callers = lqn.tasksof{hidx};
187 for caller_idx=callers
188 caller_idxclass = self.ensemble{self.idxhash(hidx)}.attribute.tasks(find(self.ensemble{self.idxhash(hidx)}.attribute.tasks(:,2) == caller_idx),1);
189 caller_tput(caller_idx-lqn.tshift) = caller_tput(caller_idx-lqn.tshift) + sum(self.results{end,self.idxhash(hidx)}.TN(self.ensemble{self.idxhash(hidx)}.attribute.clientIdx,caller_idxclass));
191 self.ptaskcallers(hidx,(lqn.tshift+1):(lqn.tshift+lqn.ntasks)) = caller_tput / max(GlobalConstants.Zero, sum(caller_tput));
195% impute call probability using a DTMC random walk on the taskcaller graph
196P = self.ptaskcallers;
197P = dtmc_makestochastic(P); % hold mass at reference stations when there
198self.ptaskcallers_step{1} = P; % configure step 1
201 for tidx = lqn.tasksof{hidx}
202 % initialize the probability mass on tidx
203 x0 = zeros(length(self.ptaskcallers),1);
206 % start the walk backward to impute probability of indirect callers
207 x = x0*
P; % skip since pcallers already calculated in
this case
208 for step=2:self.nlayers % upper bound on maximum dag height
210 % here self.ptaskcallers_step{step}(tidx,remidx)
is the
211 % probability that the request to tidx comes from remidx
212 self.ptaskcallers_step{step}(tidx,:) = x(:);
213 % here self.ptaskcallers_step{step}(hidx,remidx)
is the
214 % probability that the request to hidx comes from remidx
215 % through the call that tidx puts on hidx, which
is
216 % weighted by the relative tput of tidx on the tasks running on
218 self.ptaskcallers_step{step}(hidx,:) = self.ptaskcallers(hidx,tidx)*x(:)
';
219 if sum(x(find(lqn.isref)))>1.0-self.options.tol %#ok<FNDSB>
220 % if all the probability mass has reached backwards the
221 % reference stations, then stop
224 self.ptaskcallers(:,tidx) = max([self.ptaskcallers(:,tidx), x(:)],[],2);
228self.ensemble = ensemble;