1function updatePopulations(self, it)
2% Update the populations in each layer to address interlocking
4ilscaling = ones(lqn.nhosts+ lqn.ntasks, lqn.nhosts+ lqn.ntasks); % interlock scaling factors
6% we now define from where we count the multiplicity of a caller tasks
7% self.njobsorig(caller_tidx,idx)
is the multiplicity in the upper layer
8% of a caller to the server in model idx
10call_mult_count = self.njobsorig;
14 for hops = 1:self.nlayers
16 ilscaling(hidx) = 1.0;
17 % the following are remote (indirect) callers that are certain to be
18 % callers of task t, hence
if they have multiplicity m then task t
19 % cannot have as a matter of fact multiplicity more than m
20 callers = lqn.tasksof{hidx};
21 caller_conn_components = lqn.conntasks(callers-lqn.tshift);
22 multcallers = sum(call_mult_count(callers,hidx));
23 indirect_callers = find(self.ptaskcallers_step{hops}(hidx,:));
25 for remidx=indirect_callers(:)
'
26 % first we consider the update where the remote caller is an infinite server
27 % but since the ref task has finite multiplicity it is treated
29 if lqn.sched(remidx) == SchedStrategy.INF && ~lqn.isref(remidx)
30 % first we consider the update where the remote caller is an infinite server but we do not scale populations if a ref task is a direct client
31 multremote = Inf; % do not apply interlock correction
33 % now we multiply the probability that a request to hidx
34 % originates from remidx
35 multremote = multremote + self.ptaskcallers_step{hops}(hidx,remidx)*call_mult_count(remidx,hidx);
38 if multcallers > multremote && multremote > GlobalConstants.CoarseTol && ~isinf(multremote) && multremote < minremote
39 minremote = multremote;
40 % [multcallers, multremote]
41 % we spread the scaling proportionally to the direct
42 % caller probabilities
43 caller_spreading_ratio = self.ptaskcallers(hidx,callers); % this a probability vector so no further renormalization is needed
44 for u=unique(caller_conn_components)
45 caller_spreading_ratio(caller_conn_components==u) = caller_spreading_ratio(caller_conn_components==u)/sum(caller_spreading_ratio(caller_conn_components==u));
48 ilscaling(c,hidx) = min(1, multremote / multcallers .* caller_spreading_ratio(c==callers));
54maxhops = self.nlayers; % max backward hops in interlocking correction
55%maxhops = 1; % max backward hops in interlocking correction
59 tidx = lqn.tshift + t;
61 % the following are remote (indirect) callers that are certain to be
62 % callers of task t, hence if they have multiplicity m then task t
63 % cannot have as a matter of fact multiplicity more than m
64 [calling_idx, called_entries] = find(lqn.iscaller(:, lqn.entriesof{tidx})); %#ok<ASGLU>
65 callers = intersect(lqn.tshift+(1:lqn.ntasks), unique(calling_idx)');
66 caller_conn_components = lqn.conntasks(callers-lqn.tshift);
67 multcallers = sum(call_mult_count(callers,tidx));
68 indirect_callers = find(self.ptaskcallers_step{hops}(tidx,:)); % caller at step hops from the node
70 for remidx=indirect_callers(:)
'
71 if lqn.sched(remidx) == SchedStrategy.INF % first we consider the update where t is an infinite server
72 multremote = Inf; % do not apply interlock correction
74 multremote = multremote + self.ptaskcallers_step{hops}(tidx,remidx)*call_mult_count(remidx,tidx);
77 if multcallers > multremote && multremote > GlobalConstants.CoarseTol && ~isinf(multremote) && multremote < minremote
78 minremote = multremote;
79 % [multcallers, multremote]
80 % we spread the scaling proportionally to the direct
81 % caller probabilities
82 caller_spreading_ratio = self.ptaskcallers(tidx,callers); % this a probability vector so no further renormalization is needed
83 for u=unique(caller_conn_components)
84 caller_spreading_ratio(caller_conn_components==u) = caller_spreading_ratio(caller_conn_components==u)/sum(caller_spreading_ratio(caller_conn_components==u));
87 ilscaling(c,tidx) = min(1, multremote / multcallers .* caller_spreading_ratio(c==callers));
93self.ilscaling = ilscaling;
94self.njobs = self.njobsorig .* ilscaling;