LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
buildLayersRecursive.m
1function buildLayersRecursive(self, idx, callers, ishostlayer)
2lqn = self.lqn;
3jobPosKey = zeros(lqn.nidx,1);
4curClassKey = cell(lqn.nidx,1);
5nreplicas = lqn.repl(idx);
6%mult = lqn.mult;
7mult = lqn.maxmult; % this removes spare capacity that cannot be used
8lqn.mult = mult;
9callservtproc = self.callservtproc;
10model = Network(lqn.hashnames{idx});
11model.setChecks(false); % fast mode
12model.attribute = struct('hosts',[],'tasks',[],'entries',[],'activities',[],'calls',[],'serverIdx',0);
13if ishostlayer | any(any(lqn.issynccaller(callers, lqn.entriesof{idx}))) | any(any(lqn.isasynccaller(callers, lqn.entriesof{idx}))) %#ok<OR2>
14 clientDelay = Delay(model, 'Clients');
15 model.attribute.clientIdx = 1;
16 model.attribute.serverIdx = 2;
17 model.attribute.sourceIdx = NaN;
18else
19 model.attribute.serverIdx = 1;
20 model.attribute.clientIdx = NaN;
21 model.attribute.sourceIdx = NaN;
22end
23serverStation = cell(1,nreplicas);
24isfunctionlayer = all(lqn.isfunction(callers)) && ishostlayer;
25for m=1:nreplicas
26 if m == 1
27 serverStation{m} = Queue(model,lqn.hashnames{idx}, lqn.sched(idx));
28 else
29 serverStation{m} = Queue(model,[lqn.hashnames{idx},'.',num2str(m)], lqn.sched(idx));
30 end
31 serverStation{m}.setNumberOfServers(mult(idx));
32 serverStation{m}.attribute.ishost = ishostlayer;
33 serverStation{m}.attribute.idx = idx;
34end
35
36iscachelayer = all(lqn.iscache(callers)) && ishostlayer;
37if iscachelayer
38 cacheNode = Cache(model, lqn.hashnames{callers}, lqn.nitems(callers), lqn.itemcap{callers}, lqn.replacestrat(callers));
39end
40
41actsInCaller = lqn.actsof{callers};
42isPostAndAct = full(lqn.actposttype)==ActivityPrecedenceType.POST_AND;
43isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
44hasfork = any(intersect(find(isPostAndAct),actsInCaller));
45
46maxfanout = 1; % maximum output parallelism level of fork nodes
47for aidx = actsInCaller(:)'
48 successors = find(lqn.graph(aidx,:));
49 if any(isPostAndAct(successors))
50 maxfanout = max(maxfanout, sum(isPostAndAct(successors)));
51 end
52end
53
54if hasfork
55 forkNode = Fork(model, 'Fork_PostAnd');
56 for f=1:maxfanout
57 forkOutputRouter{f} = Router(model, ['Fork_PostAnd_',num2str(f)]);
58 end
59 forkClassStack = []; % stack with the entry class at the visited forks, the last visited is end of the list.
60end
61
62isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
63hasjoin = any(isPreAndAct(actsInCaller));
64if hasjoin
65 joinNode = Join(model, 'Join_PreAnd', forkNode);
66end
67
68aidxClass = cell(1, lqn.nidx);
69aidxThinkClass = cell(1, lqn.nidx); % auxiliary classes for activity think-time
70cidxClass = cell(1,0);
71cidxAuxClass = cell(1,0);
72
73self.servt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % server classes to update
74self.thinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % client classes to update
75self.actthinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % activity think-time classes to update
76self.arvproc_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % classes to update in the next iteration for asynch calls
77self.call_classes_updmap{idx} = zeros(0,4); % [modelidx, callidx, node, class] % calls classes to update in the next iteration (includes calls in client classes)
78self.route_prob_updmap{idx} = zeros(0,7); % [modelidx, actidxfrom, actidxto, nodefrom, nodeto, classfrom, classto] % routing probabilities to update in the next iteration
79
80if ishostlayer
81 model.attribute.hosts(end+1,:) = [NaN, model.attribute.serverIdx ];
82else
83 model.attribute.tasks(end+1,:) = [NaN, model.attribute.serverIdx ];
84end
85
86hasSource = false; % flag whether a source is needed
87openClasses = [];
88entryOpenClasses = []; % track entry-level open arrivals
89% first pass: create the classes
90for tidx_caller = callers
91 % For host layers, check if the task has any entries with sync/async callers
92 % or has open arrivals, OR if any entry is a forwarding target.
93 hasDirectCallers = false;
94 isForwardingTarget = false;
95 if ishostlayer
96 % Check if the task is a reference task (always create closed class)
97 if lqn.isref(tidx_caller)
98 hasDirectCallers = true;
99 else
100 % Check if any entry of this task has sync or async callers
101 for eidx = lqn.entriesof{tidx_caller}
102 if any(full(lqn.issynccaller(:, eidx))) || any(full(lqn.isasynccaller(:, eidx)))
103 hasDirectCallers = true;
104 break;
105 end
106 % Also check for open arrivals on this entry
107 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
108 iscell(lqn.arrival) && eidx <= length(lqn.arrival) && ...
109 ~isempty(lqn.arrival{eidx})
110 hasDirectCallers = true;
111 break;
112 end
113 % Check if this entry is a forwarding target
114 for cidx = 1:lqn.ncalls
115 if full(lqn.calltype(cidx)) == CallType.FWD && full(lqn.callpair(cidx, 2)) == eidx
116 isForwardingTarget = true;
117 break;
118 end
119 end
120 end
121 end
122 end
123 if (ishostlayer && hasDirectCallers) | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2> % if it is only an asynch caller the closed classes are not needed
124 if self.njobs(tidx_caller,idx) == 0
125 % for each entry of the calling task
126 % determine job population
127 % this block matches the corresponding calculations in
128 % updateThinkTimes
129 njobs = mult(tidx_caller)*lqn.repl(tidx_caller);
130 if isinf(njobs)
131 callers_of_tidx_caller = find(lqn.taskgraph(:,tidx_caller));
132 njobs = sum(mult(callers_of_tidx_caller)); %#ok<FNDSB>
133 if isinf(njobs)
134 % if also the callers of tidx_caller are inf servers, then use
135 % an heuristic
136 njobs = min(sum(mult(isfinite(mult)) .* lqn.repl(isfinite(mult))),1000); % Python parity: cap at 1000
137 end
138 end
139 self.njobs(tidx_caller,idx) = njobs;
140 else
141 njobs = self.njobs(tidx_caller,idx);
142 end
143 caller_name = lqn.hashnames{tidx_caller};
144 aidxClass{tidx_caller} = ClosedClass(model, caller_name, njobs, clientDelay);
145 clientDelay.setService(aidxClass{tidx_caller}, Disabled.getInstance());
146 for m=1:nreplicas
147 serverStation{m}.setService(aidxClass{tidx_caller}, Disabled.getInstance());
148 end
149 aidxClass{tidx_caller}.completes = false;
150 aidxClass{tidx_caller}.setReferenceClass(true); % renormalize residence times using the visits to the task
151 aidxClass{tidx_caller}.attribute = [LayeredNetworkElement.TASK, tidx_caller];
152 model.attribute.tasks(end+1,:) = [aidxClass{tidx_caller}.index, tidx_caller];
153 clientDelay.setService(aidxClass{tidx_caller}, self.thinkproc{tidx_caller});
154 if ~lqn.isref(tidx_caller)
155 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, tidx_caller, 1, aidxClass{tidx_caller}.index];
156 end
157 for eidx = lqn.entriesof{tidx_caller}
158 % create a class
159 aidxClass{eidx} = ClosedClass(model, lqn.hashnames{eidx}, 0, clientDelay);
160 clientDelay.setService(aidxClass{eidx}, Disabled.getInstance());
161 for m=1:nreplicas
162 serverStation{m}.setService(aidxClass{eidx}, Disabled.getInstance());
163 end
164 aidxClass{eidx}.completes = false;
165 aidxClass{eidx}.attribute = [LayeredNetworkElement.ENTRY, eidx];
166 model.attribute.entries(end+1,:) = [aidxClass{eidx}.index, eidx];
167 [singleton, javasingleton] = Immediate.getInstance();
168 if isempty(model.obj)
169 clientDelay.setService(aidxClass{eidx}, singleton);
170 else
171 clientDelay.setService(aidxClass{eidx}, javasingleton);
172 end
173
174 % Check for open arrival distribution on this entry
175 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
176 iscell(lqn.arrival) && eidx <= length(lqn.arrival) && ...
177 ~isempty(lqn.arrival{eidx})
178
179 if ~hasSource
180 hasSource = true;
181 model.attribute.sourceIdx = length(model.nodes)+1;
182 sourceStation = Source(model,'Source');
183 sinkStation = Sink(model,'Sink');
184 end
185
186 % Create open class for this entry
187 openClassForEntry = OpenClass(model, [lqn.hashnames{eidx}, '_Open'], 0);
188 sourceStation.setArrival(openClassForEntry, lqn.arrival{eidx});
189 clientDelay.setService(openClassForEntry, Disabled.getInstance());
190
191 % Use bound activity's service time (entries themselves have Immediate service)
192 % Find activities bound to this entry via graph
193 bound_act_indices = find(lqn.graph(eidx,:) > 0);
194 if ~isempty(bound_act_indices)
195 % Use first bound activity's service time
196 bound_aidx = bound_act_indices(1);
197 for m=1:nreplicas
198 serverStation{m}.setService(openClassForEntry, self.servtproc{bound_aidx});
199 end
200 else
201 % Fallback to entry service (should not happen in well-formed models)
202 for m=1:nreplicas
203 serverStation{m}.setService(openClassForEntry, self.servtproc{eidx});
204 end
205 end
206
207 % Track for routing setup later: [class_index, entry_index]
208 entryOpenClasses(end+1,:) = [openClassForEntry.index, eidx];
209
210 % Track: Use negative entry index to distinguish from call arrivals
211 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, -eidx, ...
212 model.getNodeIndex(sourceStation), openClassForEntry.index];
213
214 openClassForEntry.completes = false;
215 openClassForEntry.attribute = [LayeredNetworkElement.ENTRY, eidx];
216 end
217 end
218 end
219
220 % for each activity of the calling task
221 for aidx = lqn.actsof{tidx_caller}
222 if ishostlayer | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
223 % create a class
224 aidxClass{aidx} = ClosedClass(model, lqn.hashnames{aidx}, 0, clientDelay);
225 clientDelay.setService(aidxClass{aidx}, Disabled.getInstance());
226 for m=1:nreplicas
227 serverStation{m}.setService(aidxClass{aidx}, Disabled.getInstance());
228 end
229 aidxClass{aidx}.completes = false;
230 aidxClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
231 model.attribute.activities(end+1,:) = [aidxClass{aidx}.index, aidx];
232 hidx = lqn.parent(lqn.parent(aidx)); % index of host processor
233 if ~(ishostlayer && (hidx == idx))
234 % set the host demand for the activity
235 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
236 end
237 if lqn.sched(tidx_caller)~=SchedStrategy.REF % in 'ref' case the service activity is constant
238 % updmap(end+1,:) = [idx, aidx, 1, idxClass{aidx}.index];
239 end
240 if iscachelayer && full(lqn.graph(eidx,aidx))
241 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
242 end
243
244 % Create auxiliary think-time class if activity has think-time
245 if ~isempty(lqn.actthink{aidx}) && lqn.actthink{aidx}.getMean() > GlobalConstants.FineTol
246 aidxThinkClass{aidx} = ClosedClass(model, [lqn.hashnames{aidx},'.Think'], 0, clientDelay);
247 aidxThinkClass{aidx}.completes = false;
248 aidxThinkClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
249 clientDelay.setService(aidxThinkClass{aidx}, lqn.actthink{aidx});
250 for m=1:nreplicas
251 serverStation{m}.setService(aidxThinkClass{aidx}, Disabled.getInstance());
252 end
253 self.actthinkt_classes_updmap{idx}(end+1,:) = [idx, aidx, 1, aidxThinkClass{aidx}.index];
254 end
255 end
256 % add a class for each outgoing call from this activity
257 for cidx = lqn.callsof{aidx}
258 callmean(cidx) = lqn.callproc{cidx}.getMean;
259 switch lqn.calltype(cidx)
260 case CallType.ASYNC
261 if lqn.parent(lqn.callpair(cidx,2)) == idx % add only if the target is serverStation
262 if ~hasSource % we need to add source and sink to the model
263 hasSource = true;
264 model.attribute.sourceIdx = length(model.nodes)+1;
265 sourceStation = Source(model,'Source');
266 sinkStation = Sink(model,'Sink');
267 end
268 cidxClass{cidx} = OpenClass(model, lqn.callhashnames{cidx}, 0);
269 sourceStation.setArrival(cidxClass{cidx}, Immediate.getInstance());
270 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
271 for m=1:nreplicas
272 serverStation{m}.setService(cidxClass{cidx}, Immediate.getInstance());
273 end
274 openClasses(end+1,:) = [cidxClass{cidx}.index, callmean(cidx), cidx];
275 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
276 cidxClass{cidx}.completes = false;
277 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
278 minRespT = 0;
279 for tidx_act = lqn.actsof{idx}
280 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
281 end
282 for m=1:nreplicas
283 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
284 end
285 end
286 case CallType.SYNC
287 cidxClass{cidx} = ClosedClass(model, lqn.callhashnames{cidx}, 0, clientDelay);
288 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
289 for m=1:nreplicas
290 serverStation{m}.setService(cidxClass{cidx}, Disabled.getInstance());
291 end
292 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
293 cidxClass{cidx}.completes = false;
294 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
295 minRespT = 0;
296 for tidx_act = lqn.actsof{idx}
297 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
298 end
299 for m=1:nreplicas
300 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
301 end
302 end
303
304 if callmean(cidx) ~= nreplicas
305 switch lqn.calltype(cidx)
306 case CallType.SYNC
307 cidxAuxClass{cidx} = ClosedClass(model, [lqn.callhashnames{cidx},'.Aux'], 0, clientDelay);
308 cidxAuxClass{cidx}.completes = false;
309 cidxAuxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
310 clientDelay.setService(cidxAuxClass{cidx}, Immediate.getInstance());
311 for m=1:nreplicas
312 serverStation{m}.setService(cidxAuxClass{cidx}, Disabled.getInstance());
313 end
314 end
315 end
316 end
317 end
318end
319
320% Ensure Source's sourceClasses and arrivalProcess arrays are properly sized for all classes
321% This is needed because the Source may be created during class iteration
322% when only some classes exist, and new closed classes added afterwards
323% won't have corresponding entries in sourceClasses/arrivalProcess
324if hasSource
325 nClasses = model.getNumberOfClasses();
326 for k = 1:nClasses
327 if k > length(sourceStation.input.sourceClasses) || isempty(sourceStation.input.sourceClasses{k})
328 sourceStation.input.sourceClasses{k} = {[], ServiceStrategy.LI, Disabled.getInstance()};
329 end
330 if k > length(sourceStation.arrivalProcess) || isempty(sourceStation.arrivalProcess{k})
331 sourceStation.arrivalProcess{k} = Disabled.getInstance();
332 end
333 end
334end
335
336P = model.initRoutingMatrix;
337if hasSource
338 for o = 1:size(openClasses,1)
339 oidx = openClasses(o,1);
340 p = 1 / openClasses(o,2); % divide by mean number of calls, they go to a server at random
341 for m=1:nreplicas
342 P{model.classes{oidx}, model.classes{oidx}}(sourceStation,serverStation{m}) = 1/nreplicas;
343 for n=1:nreplicas
344 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},serverStation{n}) = (1-p)/nreplicas;
345 end
346 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},sinkStation) = p;
347 end
348 cidx = openClasses(o,3); % 3 = source
349 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(sourceStation), oidx];
350 for m=1:nreplicas
351 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), oidx];
352 end
353 end
354end
355
356%% job positions are encoded as follows: 1=client, 2=any of the nreplicas server stations, 3=cache node, 4=fork node, 5=join node
357atClient = 1;
358atServer = 2;
359atCache = 3;
360
361jobPos = atClient; % start at client
362% second pass: setup the routing out of entries
363for tidx_caller = callers
364 % Use same condition as first pass - only process if closed class was created
365 hasDirectCallers = false;
366 if ishostlayer
367 if lqn.isref(tidx_caller)
368 hasDirectCallers = true;
369 else
370 for eidx_check = lqn.entriesof{tidx_caller}
371 if any(full(lqn.issynccaller(:, eidx_check))) || any(full(lqn.isasynccaller(:, eidx_check)))
372 hasDirectCallers = true;
373 break;
374 end
375 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
376 iscell(lqn.arrival) && eidx_check <= length(lqn.arrival) && ...
377 ~isempty(lqn.arrival{eidx_check})
378 hasDirectCallers = true;
379 break;
380 end
381 end
382 end
383 end
384 if (ishostlayer && hasDirectCallers) | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
385 % for each entry of the calling task
386 ncaller_entries = length(lqn.entriesof{tidx_caller});
387 for eidx = lqn.entriesof{tidx_caller}
388 aidxClass_eidx = aidxClass{eidx};
389 aidxClass_tidx_caller = aidxClass{tidx_caller};
390 % initialize the probability to select an entry to be identical
391 P{aidxClass_tidx_caller, aidxClass_eidx}(clientDelay, clientDelay) = 1 / ncaller_entries;
392 if ncaller_entries > 1
393 % at successive iterations make sure to replace this with throughput ratio
394 self.route_prob_updmap{idx}(end+1,:) = [idx, tidx_caller, eidx, 1, 1, aidxClass_tidx_caller.index, aidxClass_eidx.index];
395 end
396 P = recurActGraph(P, tidx_caller, eidx, aidxClass_eidx, jobPos);
397 end
398 end
399end
400
401% Setup routing for entry-level open arrivals (AFTER recurActGraph to avoid being overwritten)
402if hasSource && ~isempty(entryOpenClasses)
403 for e = 1:size(entryOpenClasses,1)
404 eoidx = entryOpenClasses(e,1); % class index
405 openClass = model.classes{eoidx};
406
407 % Explicitly set routing: ONLY Source → Server → Sink
408 % Zero out all routing for this class first
409 for node1 = 1:length(model.nodes)
410 for node2 = 1:length(model.nodes)
411 P{openClass, openClass}(node1, node2) = 0;
412 end
413 end
414
415 % Now set the correct routing
416 for m=1:nreplicas
417 % Route: Source → ServerStation → Sink
418 P{openClass, openClass}(sourceStation,serverStation{m}) = 1/nreplicas;
419 P{openClass, openClass}(serverStation{m},sinkStation) = 1.0;
420 end
421 end
422end
423
424model.link(P);
425self.ensemble{idx} = model;
426
427 function [P, curClass, jobPos] = recurActGraph(P, tidx_caller, aidx, curClass, jobPos)
428 jobPosKey(aidx) = jobPos;
429 curClassKey{aidx} = curClass;
430 nextaidxs = find(lqn.graph(aidx,:)); % these include the called entries
431 if ~isempty(nextaidxs)
432 isNextPrecFork(aidx) = any(isPostAndAct(nextaidxs)); % indexed on aidx to avoid losing it during the recursion
433 end
434
435 for nextaidx = nextaidxs % for all successor activities
436 if ~isempty(nextaidx)
437 isLoop = false;
438 % in the activity graph, the following if is entered only
439 % by an edge that is the return from a LOOP activity
440 if (lqn.graph(aidx,nextaidx) ~= lqn.dag(aidx,nextaidx))
441 isLoop = true;
442 end
443 if ~(lqn.parent(aidx) == lqn.parent(nextaidx)) % if different parent task
444 % if the successor activity is an entry of another task, this is a call
445 cidx = matchrow(lqn.callpair,[aidx,nextaidx]); % find the call index
446 switch lqn.calltype(cidx)
447 case CallType.ASYNC
448 % Async calls don't modify caller routing - caller continues immediately without blocking.
449 % Arrival rate at destination is handled via arvproc_classes_updmap (lines 170-195, 230-248).
450 case CallType.SYNC
451 [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass);
452 case CallType.FWD
453 % Forwarding: after the source entry's activity completes,
454 % route to the forwarding target's bound activity on the same processor
455 if ishostlayer
456 target_entry = nextaidx;
457 target_bound_acts = find(lqn.graph(target_entry, :));
458 if ~isempty(target_bound_acts)
459 target_aidx = target_bound_acts(1); % First bound activity
460 fwd_prob = full(lqn.graph(aidx, target_entry)); % Forwarding probability
461
462 % Create class for target activity if needed
463 if isempty(aidxClass{target_aidx})
464 aidxClass{target_aidx} = ClosedClass(model, lqn.hashnames{target_aidx}, 0, clientDelay);
465 clientDelay.setService(aidxClass{target_aidx}, Disabled.getInstance());
466 for m=1:nreplicas
467 serverStation{m}.setService(aidxClass{target_aidx}, Disabled.getInstance());
468 end
469 aidxClass{target_aidx}.completes = false;
470 aidxClass{target_aidx}.attribute = [LayeredNetworkElement.ACTIVITY, target_aidx];
471 model.attribute.activities(end+1,:) = [aidxClass{target_aidx}.index, target_aidx];
472 end
473
474 % Route from current position to target activity at server
475 for m=1:nreplicas
476 if jobPos == atClient
477 P{curClass, aidxClass{target_aidx}}(clientDelay, serverStation{m}) = fwd_prob / nreplicas;
478 else
479 P{curClass, aidxClass{target_aidx}}(serverStation{m}, serverStation{m}) = fwd_prob;
480 end
481 serverStation{m}.setService(aidxClass{target_aidx}, lqn.hostdem{target_aidx});
482 end
483 self.servt_classes_updmap{idx}(end+1,:) = [idx, target_aidx, 2, aidxClass{target_aidx}.index];
484
485 jobPos = atServer;
486 curClass = aidxClass{target_aidx};
487
488 % Check if target entry also has forwarding (chained forwarding)
489 target_nextaidxs = find(lqn.graph(target_entry, :));
490 for target_nextaidx = target_nextaidxs
491 if target_nextaidx ~= target_aidx % Skip the bound activity we just processed
492 % Check if this is another forwarding call
493 target_cidx = matchrow(lqn.callpair, [target_entry, target_nextaidx]);
494 if ~isempty(target_cidx) && lqn.calltype(target_cidx) == CallType.FWD
495 % Recursively handle chained forwarding
496 [P, curClass, jobPos] = recurActGraph(P, tidx_caller, target_entry, curClass, jobPos);
497 end
498 end
499 end
500 end
501 end
502 end
503 else
504 % at this point, we have processed all calls, let us do the
505 % activities local to the task next
506 if isempty(intersect(lqn.eshift+(1:lqn.nentries), nextaidxs))
507 % if next activity is not an entry
508 jobPos = jobPosKey(aidx);
509 curClass = curClassKey{aidx};
510 else
511 if ismember(nextaidxs(find(nextaidxs==nextaidx)-1), lqn.eshift+(1:lqn.nentries))
512 curClassC = curClass;
513 end
514 jobPos = atClient;
515 curClass = curClassC;
516 end
517 if jobPos == atClient % at client node
518 if ishostlayer
519 if ~iscachelayer
520 for m=1:nreplicas
521 if isNextPrecFork(aidx)
522 % if next activity is a post-and
523 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
524 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
525 forkClassStack(end+1) = curClass.index;
526 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
527 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
528 else
529 if isPreAndAct(aidx)
530 % before entering the job we go back to the entry class at the last fork
531 forkClass = model.classes{forkClassStack(end)};
532 forkClassStack(end) = [];
533 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
534 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
535 else
536 P{curClass, aidxClass{nextaidx}}(clientDelay,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
537 end
538 end
539 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
540 if isfunctionlayer
541 serverStation{m}.setDelayOff(aidxClass{nextaidx}, lqn.setuptime{lqn.parent(nextaidx)}, lqn.delayofftime{lqn.parent(nextaidx)});
542 end
543 end
544 jobPos = atServer;
545 curClass = aidxClass{nextaidx};
546 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
547 else
548 P{curClass, aidxClass{nextaidx}}(clientDelay,cacheNode) = full(lqn.graph(aidx,nextaidx));
549
550 cacheNode.setReadItemEntry(aidxClass{nextaidx},lqn.itemproc{aidx},lqn.nitems(aidx));
551 lqn.hitmissaidx = find(lqn.graph(nextaidx,:));
552 lqn.hitaidx = lqn.hitmissaidx(1);
553 lqn.missaidx = lqn.hitmissaidx(2);
554
555 cacheNode.setHitClass(aidxClass{nextaidx},aidxClass{lqn.hitaidx});
556 cacheNode.setMissClass(aidxClass{nextaidx},aidxClass{lqn.missaidx});
557
558 jobPos = atCache; % cache
559 curClass = aidxClass{nextaidx};
560 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.hitaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.hitaidx}.index];
561 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.missaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.missaidx}.index];
562 end
563 else % not ishostlayer
564 if isNextPrecFork(aidx)
565 % if next activity is a post-and
566 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
567 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
568 forkClassStack(end+1) = curClass.index;
569 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
570 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
571 else
572 if isPreAndAct(aidx)
573 % before entering the job we go back to the entry class at the last fork
574 forkClass = model.classes{forkClassStack(end)};
575 forkClassStack(end) = [];
576 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
577 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
578 else
579 P{curClass, aidxClass{nextaidx}}(clientDelay,clientDelay) = full(lqn.graph(aidx,nextaidx));
580 end
581 end
582 jobPos = atClient;
583 curClass = aidxClass{nextaidx};
584 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
585 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
586 end
587 elseif jobPos == atServer || jobPos == atCache % at server station
588 if ishostlayer
589 if iscachelayer
590 curClass = aidxClass{nextaidx};
591 for m=1:nreplicas
592 if isNextPrecFork(aidx)
593 % if next activity is a post-and
594 P{curClass, curClass}(cacheNode, forkNode) = 1.0;
595 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
596 forkClassStack(end+1) = curClass.index;
597 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
598 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
599 else
600 if isPreAndAct(aidx)
601 % before entering the job we go back to the entry class at the last fork
602 forkClass = model.classes{forkClassStack(end)};
603 forkClassStack(end) = [];
604
605 P{curClass, forkClass}(cacheNode,joinNode) = 1.0;
606 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
607 else
608 P{curClass, aidxClass{nextaidx}}(cacheNode,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
609 end
610 end
611 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
612 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, nextaidx, 3, 2, aidxClass{nextaidx}.index, aidxClass{nextaidx}.index];
613 end
614 else
615 for m=1:nreplicas
616 if isNextPrecFork(aidx)
617 % if next activity is a post-and
618 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
619 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
620 forkClassStack(end+1) = curClass.index;
621 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
622 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
623 else
624 if isPreAndAct(aidx)
625 % before entering the job we go back to the entry class at the last fork
626 forkClass = model.classes{forkClassStack(end)};
627 forkClassStack(end) = [];
628 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
629 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
630 else
631 P{curClass, aidxClass{nextaidx}}(serverStation{m},serverStation{m}) = full(lqn.graph(aidx,nextaidx));
632 end
633 end
634 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
635 end
636 end
637 jobPos = atServer;
638 curClass = aidxClass{nextaidx};
639 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
640 else
641 for m=1:nreplicas
642 if isNextPrecFork(aidx)
643 % if next activity is a post-and
644 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
645 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
646 forkClassStack(end+1) = curClass.index;
647 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
648 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
649 else
650 if isPreAndAct(aidx)
651 % before entering the job we go back to the entry class at the last fork
652 forkClass = model.classes{forkClassStack(end)};
653 forkClassStack(end) = [];
654 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
655 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
656 else
657 P{curClass, aidxClass{nextaidx}}(serverStation{m},clientDelay) = full(lqn.graph(aidx,nextaidx));
658 end
659 end
660 end
661 jobPos = atClient;
662 curClass = aidxClass{nextaidx};
663 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
664 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
665 end
666 end
667 if aidx ~= nextaidx && ~isLoop
668 %% now recursively build the rest of the routing matrix graph
669 [P, curClass, jobPos] = recurActGraph(P, tidx_caller, nextaidx, curClass, jobPos);
670
671 % At this point curClassRec is the last class in the
672 % recursive branch, which we now close with a reply
673 if jobPos == atClient
674 P{curClass, aidxClass{tidx_caller}}(clientDelay,clientDelay) = 1;
675 if ~strcmp(curClass.name(end-3:end),'.Aux')
676 curClass.completes = true;
677 end
678 else
679 for m=1:nreplicas
680 P{curClass, aidxClass{tidx_caller}}(serverStation{m},clientDelay) = 1;
681 end
682 if ~strcmp(curClass.name(end-3:end),'.Aux')
683 curClass.completes = true;
684 end
685 end
686 end
687 end
688 end
689 end % nextaidx
690 end
691
692 function [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass)
693 switch jobPos
694 case atClient
695 if lqn.parent(lqn.callpair(cidx,2)) == idx
696 % if a call to an entry of the server in this layer
697 if callmean(cidx) < nreplicas
698 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx); % note that callmean(cidx) < nreplicas
699 for m=1:nreplicas
700 % if isNextPrecFork(aidx)
701 % end
702 % % if next activity is a post-and
703 % P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
704 % forkStackClass(end+1) = curClass.index;
705 % f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
706 % P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
707 % P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
708 % else
709 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = callmean(cidx) / nreplicas;
710 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
711 end
712 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
713 elseif callmean(cidx) == nreplicas
714 for m=1:nreplicas
715 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
716 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0;
717 end
718 else % callmean(cidx) > nreplicas
719 for m=1:nreplicas
720 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
721 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1.0 ;
722 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1.0 - 1.0 / (callmean(cidx) / nreplicas);
723 end
724 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0 / (callmean(cidx));
725 end
726 jobPos = atClient;
727 clientDelay.setService(cidxClass{cidx}, Immediate.getInstance());
728 for m=1:nreplicas
729 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
730 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
731 end
732 curClass = cidxClass{cidx};
733 else
734 % if it is not a call to an entry of the server
735 if callmean(cidx) < nreplicas
736 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
737 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
738 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
739 curClass = cidxAuxClass{cidx};
740 elseif callmean(cidx) == nreplicas
741 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1;
742 curClass = cidxClass{cidx};
743 else % callmean(cidx) > nreplicas
744 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
745 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;% / (callmean(cidx)/nreplicas); % the mean number of calls is now embedded in the demand
746 curClass = cidxAuxClass{cidx};
747 end
748 jobPos = atClient;
749 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
750 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
751 end
752 case atServer % job at server
753 if lqn.parent(lqn.callpair(cidx,2)) == idx
754 % if it is a call to an entry of the server
755 if callmean(cidx) < nreplicas
756 for m=1:nreplicas
757 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1 - callmean(cidx);
758 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = callmean(cidx);
759 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
760 end
761 jobPos = atClient;
762 curClass = cidxAuxClass{cidx};
763 elseif callmean(cidx) == nreplicas
764 for m=1:nreplicas
765 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
766 end
767 jobPos = atServer;
768 curClass = cidxClass{cidx};
769 else % callmean(cidx) > nreplicas
770 for m=1:nreplicas
771 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
772 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1 - 1 / (callmean(cidx));
773 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1 / (callmean(cidx));
774 end
775 jobPos = atClient;
776 curClass = cidxAuxClass{cidx};
777 end
778 for m=1:nreplicas
779 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
780 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
781 end
782 else
783 % if it is not a call to an entry of the server
784 % callmean not needed since we switched
785 % to ResidT to model service time at client
786 if callmean(cidx) < nreplicas
787 for m=1:nreplicas
788 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
789 end
790 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
791 curClass = cidxAuxClass{cidx};
792 elseif callmean(cidx) == nreplicas
793 for m=1:nreplicas
794 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
795 end
796 curClass = cidxClass{cidx};
797 else % callmean(cidx) > nreplicas
798 for m=1:nreplicas
799 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
800 end
801 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
802 curClass = cidxAuxClass{cidx};
803 end
804 jobPos = atClient;
805 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
806 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
807 end
808 end
809
810 % After synch call returns, route through activity think-time class if applicable
811 callingAidx = lqn.callpair(cidx, 1); % source activity of the call
812 if ~isempty(aidxThinkClass{callingAidx})
813 % Route from current class to think-time class at client
814 P{curClass, aidxThinkClass{callingAidx}}(clientDelay, clientDelay) = 1.0;
815 curClass = aidxThinkClass{callingAidx};
816 jobPos = atClient;
817 end
818 end
819end
Definition mmt.m:92