1function updateThinkTimes(self, it)
2% Update the think times of all callers at iteration it. The method handles
3% differently the
case where a caller
is a ref task than the
case where the
4% caller
is a queueing station. A coarse heuristic
is used when one or more
5% callers are themselves infinite servers.
7% create local variable due to MATLAB
's slow access to self properties
10results = self.results;
12% main code starts here
13if size(lqn.iscaller,2) > 0 % ignore models without callers
14 torder = 1:(lqn.ntasks); % set sequential order to update the tasks
15 % solve all task models
17 tidx = lqn.tshift + t;
18 tidx_thinktime = lqn.think{tidx}.getMean; % user specified think time
19 %if ~lqn.isref(tidx) && ~isnan(idxhash(tidx)) % update tasks ignore ref tasks and empty tasks
20 if ~isnan(self.idxhash(tidx)) % this skips all REF tasks
21 % obtain total self.tput of task t
22 % mean throughput of task t in the model where it is a server, summed across replicas
23 njobs = max(self.njobs(tidx,:)); % we use njobs to adapt to interlocking corrections
24 self.tput(tidx) = lqn.repl(tidx)*sum(results{end,idxhash(tidx)}.TN(self.ensemble{idxhash(tidx)}.attribute.serverIdx,:),2);
25 if lqn.sched(tidx) == SchedStrategy.INF % first we consider the update where t is an infinite server
26 % obtain total self.utilization of task t
27 self.util(tidx) = sum(results{end,idxhash(tidx)}.UN(self.ensemble{idxhash(tidx)}.attribute.serverIdx,:),2);
28 % key think time update formula for LQNs, this accounts for the fact that in LINE infinite server self.utilization is dimensionally a mean number of jobs
29 self.thinkt(tidx) = max(GlobalConstants.Zero, (njobs-self.util(tidx)) / self.tput(tidx) - tidx_thinktime);
30 else % otherwise we consider the case where t is a regular queueing station (other than an infinite server)
31 self.util(tidx) = sum(results{end,idxhash(tidx)}.UN(self.ensemble{idxhash(tidx)}.attribute.serverIdx,:),2); % self.utilization of t as a server
32 % key think time update formula for LQNs, this accounts that in LINE self.utilization is scaled in [0,1] for all queueing stations irrespectively of the number of servers
33 self.thinkt(tidx) = max(GlobalConstants.Zero, njobs*abs(1-self.util(tidx)) / self.tput(tidx) - tidx_thinktime);
35 % Recover from Inf/NaN: snap back to previous iteration's value
36 if it > 1 && ~isnan(self.thinkt_prev(tidx))
37 if isinf(self.thinkt(tidx)) || isnan(self.thinkt(tidx))
38 self.thinkt(tidx) = self.thinkt_prev(tidx);
41 % Apply under-relaxation to think time
if enabled
42 omega = self.relax_omega;
43 if omega < 1.0 && it > 1 && ~isnan(self.thinkt_prev(tidx))
44 rawT = self.thinkt(tidx);
45 prevT = self.thinkt_prev(tidx);
46 % If recovering from crash (prev much larger than raw), snap to raw
47 if prevT > 10 * rawT && rawT > GlobalConstants.FineTol
48 self.thinkt_prev(tidx) = rawT; % reset prev to allow recovery
50 self.thinkt(tidx) = omega * self.thinkt(tidx) + (1 - omega) * self.thinkt_prev(tidx);
52 self.thinkt_prev(tidx) = self.thinkt(tidx);
53 self.thinktproc{tidx} = Exp.fitMean(self.thinkt(tidx) + tidx_thinktime);
54 else % ref task or forwarding target (no task layer)
55 % Check
if this is a forwarding target task
58 for eidx_fwd = lqn.entriesof{tidx}
59 for cidx_fwd = 1:lqn.ncalls
60 if lqn.calltype(cidx_fwd) == CallType.FWD && lqn.callpair(cidx_fwd, 2) == eidx_fwd
62 source_eidx = lqn.callpair(cidx_fwd, 1);
63 source_tidx = lqn.parent(source_eidx);
64 fwd_prob = lqn.callproc{cidx_fwd}.getMean();
68 if isFwdTarget;
break; end
72 % Forwarding target think time: derived from the source
73 % task
's throughput, forwarding probability, and the
74 % processor response time for the target's activities.
75 % Formula: thinkt = njobs / arrival_rate - host_residt
76 % where host_residt
is the response time at the processor.
77 njobs = max(self.njobs(tidx,:));
78 tidx_thinktime = lqn.think{tidx}.getMean;
79 arrival_rate = self.tput(source_tidx) * fwd_prob;
80 if arrival_rate > GlobalConstants.FineTol && njobs > 0
81 self.tput(tidx) = arrival_rate;
82 % Subtract the processor response time
for the target
's
83 % activities (already computed by updateMetricsDefault)
84 target_eidx = lqn.callpair(cidx_fwd, 2);
86 for aidx_fwd = lqn.actsof{target_eidx}
87 host_residt = host_residt + self.residt(aidx_fwd);
89 self.thinkt(tidx) = max(GlobalConstants.Zero, njobs / arrival_rate - host_residt - tidx_thinktime);
91 % Source throughput not yet available; use large think time
92 self.thinkt(tidx) = 1000;
94 % Apply under-relaxation
95 omega = self.relax_omega;
96 if omega < 1.0 && it > 1 && ~isnan(self.thinkt_prev(tidx))
97 self.thinkt(tidx) = omega * self.thinkt(tidx) + (1 - omega) * self.thinkt_prev(tidx);
99 self.thinkt_prev(tidx) = self.thinkt(tidx);
100 self.thinktproc{tidx} = Exp.fitMean(self.thinkt(tidx) + tidx_thinktime);
102 self.thinkt(tidx) = GlobalConstants.FineTol;
103 self.thinktproc{tidx} = Immediate();