LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
updateThinkTimes.m
1function updateThinkTimes(self, it)
2% Update the think times of all callers at iteration it. The method handles
3% differently the case where a caller is a ref task than the case where the
4% caller is a queueing station. A coarse heuristic is used when one or more
5% callers are themselves infinite servers.
6
7% create local variable due to MATLAB's slow access to self properties
8lqn = self.lqn;
9idxhash = self.idxhash;
10results = self.results;
11
12% main code starts here
13if size(lqn.iscaller,2) > 0 % ignore models without callers
14 torder = 1:(lqn.ntasks); % set sequential order to update the tasks
15 % solve all task models
16 for t = torder
17 tidx = lqn.tshift + t;
18 tidx_thinktime = lqn.think{tidx}.getMean; % user specified think time
19 %if ~lqn.isref(tidx) && ~isnan(idxhash(tidx)) % update tasks ignore ref tasks and empty tasks
20 if ~isnan(self.idxhash(tidx)) % this skips all REF tasks
21 % obtain total self.tput of task t
22 % mean throughput of task t in the model where it is a server, summed across replicas
23 njobs = max(self.njobs(tidx,:)); % we use njobs to adapt to interlocking corrections
24 self.tput(tidx) = lqn.repl(tidx)*sum(results{end,idxhash(tidx)}.TN(self.ensemble{idxhash(tidx)}.attribute.serverIdx,:),2);
25 if lqn.sched(tidx) == SchedStrategy.INF % first we consider the update where t is an infinite server
26 % obtain total self.utilization of task t
27 self.util(tidx) = sum(results{end,idxhash(tidx)}.UN(self.ensemble{idxhash(tidx)}.attribute.serverIdx,:),2);
28 % key think time update formula for LQNs, this accounts for the fact that in LINE infinite server self.utilization is dimensionally a mean number of jobs
29 self.thinkt(tidx) = max(GlobalConstants.Zero, (njobs-self.util(tidx)) / self.tput(tidx) - tidx_thinktime);
30 else % otherwise we consider the case where t is a regular queueing station (other than an infinite server)
31 self.util(tidx) = sum(results{end,idxhash(tidx)}.UN(self.ensemble{idxhash(tidx)}.attribute.serverIdx,:),2); % self.utilization of t as a server
32 % key think time update formula for LQNs, this accounts that in LINE self.utilization is scaled in [0,1] for all queueing stations irrespectively of the number of servers
33 self.thinkt(tidx) = max(GlobalConstants.Zero, njobs*abs(1-self.util(tidx)) / self.tput(tidx) - tidx_thinktime);
34 end
35 % Apply under-relaxation to think time if enabled
36 omega = self.relax_omega;
37 if omega < 1.0 && it > 1 && ~isnan(self.thinkt_prev(tidx))
38 self.thinkt(tidx) = omega * self.thinkt(tidx) + (1 - omega) * self.thinkt_prev(tidx);
39 end
40 self.thinkt_prev(tidx) = self.thinkt(tidx);
41 self.thinktproc{tidx} = Exp.fitMean(self.thinkt(tidx) + tidx_thinktime);
42 else % set to zero if this is a ref task
43 self.thinkt(tidx) = GlobalConstants.FineTol;
44 self.thinktproc{tidx} = Immediate();
45 end
46 end
47end
48end