LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
buildLayersRecursive.m
1function buildLayersRecursive(self, idx, callers, ishostlayer)
2lqn = self.lqn;
3jobPosKey = zeros(lqn.nidx,1);
4curClassKey = cell(lqn.nidx,1);
5nreplicas = lqn.repl(idx);
6%mult = lqn.mult;
7mult = lqn.maxmult; % this removes spare capacity that cannot be used
8lqn.mult = mult;
9callservtproc = self.callservtproc;
10model = Network(lqn.hashnames{idx});
11model.setChecks(false); % fast mode
12model.attribute = struct('hosts',[],'tasks',[],'entries',[],'activities',[],'calls',[],'serverIdx',0);
13if ishostlayer | any(any(lqn.issynccaller(callers, lqn.entriesof{idx}))) | any(any(lqn.isasynccaller(callers, lqn.entriesof{idx}))) %#ok<OR2>
14 clientDelay = Delay(model, 'Clients');
15 model.attribute.clientIdx = 1;
16 model.attribute.serverIdx = 2;
17 model.attribute.sourceIdx = NaN;
18else
19 model.attribute.serverIdx = 1;
20 model.attribute.clientIdx = NaN;
21 model.attribute.sourceIdx = NaN;
22end
23serverStation = cell(1,nreplicas);
24isfunctionlayer = all(lqn.isfunction(callers)) && ishostlayer;
25for m=1:nreplicas
26 if m == 1
27 serverStation{m} = Queue(model,lqn.hashnames{idx}, lqn.sched(idx));
28 else
29 serverStation{m} = Queue(model,[lqn.hashnames{idx},'.',num2str(m)], lqn.sched(idx));
30 end
31 serverStation{m}.setNumberOfServers(mult(idx));
32 serverStation{m}.attribute.ishost = ishostlayer;
33 serverStation{m}.attribute.idx = idx;
34end
35
36iscachelayer = all(lqn.iscache(callers)) && ishostlayer;
37if iscachelayer
38 cacheNode = Cache(model, lqn.hashnames{callers}, lqn.nitems(callers), lqn.itemcap{callers}, lqn.replacestrat(callers));
39end
40
41actsInCaller = lqn.actsof{callers};
42isPostAndAct = full(lqn.actposttype)==ActivityPrecedenceType.POST_AND;
43isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
44hasfork = any(intersect(find(isPostAndAct),actsInCaller));
45
46maxfanout = 1; % maximum output parallelism level of fork nodes
47for aidx = actsInCaller(:)'
48 successors = find(lqn.graph(aidx,:));
49 if any(isPostAndAct(successors))
50 maxfanout = max(maxfanout, sum(isPostAndAct(successors)));
51 end
52end
53
54if hasfork
55 forkNode = Fork(model, 'Fork_PostAnd');
56 for f=1:maxfanout
57 forkOutputRouter{f} = Router(model, ['Fork_PostAnd_',num2str(f)]);
58 end
59 forkClassStack = []; % stack with the entry class at the visited forks, the last visited is end of the list.
60end
61
62isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
63hasjoin = any(isPreAndAct(actsInCaller));
64if hasjoin
65 joinNode = Join(model, 'Join_PreAnd', forkNode);
66end
67
68aidxClass = cell(1, lqn.nidx);
69aidxThinkClass = cell(1, lqn.nidx); % auxiliary classes for activity think-time
70cidxClass = cell(1,0);
71cidxAuxClass = cell(1,0);
72
73self.servt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % server classes to update
74self.thinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % client classes to update
75self.actthinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % activity think-time classes to update
76self.arvproc_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % classes to update in the next iteration for asynch calls
77self.call_classes_updmap{idx} = zeros(0,4); % [modelidx, callidx, node, class] % calls classes to update in the next iteration (includes calls in client classes)
78self.route_prob_updmap{idx} = zeros(0,7); % [modelidx, actidxfrom, actidxto, nodefrom, nodeto, classfrom, classto] % routing probabilities to update in the next iteration
79
80if ishostlayer
81 model.attribute.hosts(end+1,:) = [NaN, model.attribute.serverIdx ];
82else
83 model.attribute.tasks(end+1,:) = [NaN, model.attribute.serverIdx ];
84end
85
86hasSource = false; % flag whether a source is needed
87openClasses = [];
88entryOpenClasses = []; % track entry-level open arrivals
89% first pass: create the classes
90for tidx_caller = callers
91 % For host layers, check if the task has any entries with sync/async callers
92 % or has open arrivals, OR if any entry is a forwarding target.
93 hasDirectCallers = false;
94 isForwardingTarget = false;
95 if ishostlayer
96 % Check if the task is a reference task (always create closed class)
97 if lqn.isref(tidx_caller)
98 hasDirectCallers = true;
99 else
100 % Check if any entry of this task has sync or async callers
101 for eidx = lqn.entriesof{tidx_caller}
102 if any(full(lqn.issynccaller(:, eidx))) || any(full(lqn.isasynccaller(:, eidx)))
103 hasDirectCallers = true;
104 break;
105 end
106 % Also check for open arrivals on this entry
107 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
108 iscell(lqn.arrival) && eidx <= length(lqn.arrival) && ...
109 ~isempty(lqn.arrival{eidx})
110 hasDirectCallers = true;
111 break;
112 end
113 % Check if this entry is a forwarding target
114 for cidx = 1:lqn.ncalls
115 if full(lqn.calltype(cidx)) == CallType.FWD && full(lqn.callpair(cidx, 2)) == eidx
116 isForwardingTarget = true;
117 break;
118 end
119 end
120 end
121 end
122 end
123 if (ishostlayer && hasDirectCallers) | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2> % if it is only an asynch caller the closed classes are not needed
124 if self.njobs(tidx_caller,idx) == 0
125 % for each entry of the calling task
126 % determine job population
127 % this block matches the corresponding calculations in
128 % updateThinkTimes
129 njobs = mult(tidx_caller)*lqn.repl(tidx_caller);
130 if isinf(njobs)
131 callers_of_tidx_caller = find(lqn.taskgraph(:,tidx_caller));
132 njobs = sum(mult(callers_of_tidx_caller)); %#ok<FNDSB>
133 if isinf(njobs)
134 % if also the callers of tidx_caller are inf servers, then use
135 % an heuristic
136 njobs = min(sum(mult(isfinite(mult)) .* lqn.repl(isfinite(mult))),1e6);
137 end
138 end
139 self.njobs(tidx_caller,idx) = njobs;
140 else
141 njobs = self.njobs(tidx_caller,idx);
142 end
143 caller_name = lqn.hashnames{tidx_caller};
144 aidxClass{tidx_caller} = ClosedClass(model, caller_name, njobs, clientDelay);
145 clientDelay.setService(aidxClass{tidx_caller}, Disabled.getInstance());
146 for m=1:nreplicas
147 serverStation{m}.setService(aidxClass{tidx_caller}, Disabled.getInstance());
148 end
149 aidxClass{tidx_caller}.completes = false;
150 aidxClass{tidx_caller}.setReferenceClass(true); % renormalize residence times using the visits to the task
151 aidxClass{tidx_caller}.attribute = [LayeredNetworkElement.TASK, tidx_caller];
152 model.attribute.tasks(end+1,:) = [aidxClass{tidx_caller}.index, tidx_caller];
153 %self.thinkproc
154 clientDelay.setService(aidxClass{tidx_caller}, self.thinkproc{tidx_caller});
155 if ~lqn.isref(tidx_caller)
156 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, tidx_caller, 1, aidxClass{tidx_caller}.index];
157 end
158 for eidx = lqn.entriesof{tidx_caller}
159 % create a class
160 aidxClass{eidx} = ClosedClass(model, lqn.hashnames{eidx}, 0, clientDelay);
161 clientDelay.setService(aidxClass{eidx}, Disabled.getInstance());
162 for m=1:nreplicas
163 serverStation{m}.setService(aidxClass{eidx}, Disabled.getInstance());
164 end
165 aidxClass{eidx}.completes = false;
166 aidxClass{eidx}.attribute = [LayeredNetworkElement.ENTRY, eidx];
167 model.attribute.entries(end+1,:) = [aidxClass{eidx}.index, eidx];
168 [singleton, javasingleton] = Immediate.getInstance();
169 if isempty(model.obj)
170 clientDelay.setService(aidxClass{eidx}, singleton);
171 else
172 clientDelay.setService(aidxClass{eidx}, javasingleton);
173 end
174
175 % Check for open arrival distribution on this entry
176 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
177 iscell(lqn.arrival) && eidx <= length(lqn.arrival) && ...
178 ~isempty(lqn.arrival{eidx})
179
180 if ~hasSource
181 hasSource = true;
182 model.attribute.sourceIdx = length(model.nodes)+1;
183 sourceStation = Source(model,'Source');
184 sinkStation = Sink(model,'Sink');
185 end
186
187 % Create open class for this entry
188 openClassForEntry = OpenClass(model, [lqn.hashnames{eidx}, '_Open'], 0);
189 sourceStation.setArrival(openClassForEntry, lqn.arrival{eidx});
190 clientDelay.setService(openClassForEntry, Disabled.getInstance());
191
192 % Use bound activity's service time (entries themselves have Immediate service)
193 % Find activities bound to this entry via graph
194 bound_act_indices = find(lqn.graph(eidx,:) > 0);
195 if ~isempty(bound_act_indices)
196 % Use first bound activity's service time
197 bound_aidx = bound_act_indices(1);
198 for m=1:nreplicas
199 serverStation{m}.setService(openClassForEntry, self.servtproc{bound_aidx});
200 end
201 else
202 % Fallback to entry service (should not happen in well-formed models)
203 for m=1:nreplicas
204 serverStation{m}.setService(openClassForEntry, self.servtproc{eidx});
205 end
206 end
207
208 % Track for routing setup later: [class_index, entry_index]
209 entryOpenClasses(end+1,:) = [openClassForEntry.index, eidx];
210
211 % Track: Use negative entry index to distinguish from call arrivals
212 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, -eidx, ...
213 model.getNodeIndex(sourceStation), openClassForEntry.index];
214
215 openClassForEntry.completes = false;
216 openClassForEntry.attribute = [LayeredNetworkElement.ENTRY, eidx];
217 end
218 end
219 end
220
221 % for each activity of the calling task
222 for aidx = lqn.actsof{tidx_caller}
223 if ishostlayer | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
224 % create a class
225 aidxClass{aidx} = ClosedClass(model, lqn.hashnames{aidx}, 0, clientDelay);
226 clientDelay.setService(aidxClass{aidx}, Disabled.getInstance());
227 for m=1:nreplicas
228 serverStation{m}.setService(aidxClass{aidx}, Disabled.getInstance());
229 end
230 aidxClass{aidx}.completes = false;
231 aidxClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
232 model.attribute.activities(end+1,:) = [aidxClass{aidx}.index, aidx];
233 hidx = lqn.parent(lqn.parent(aidx)); % index of host processor
234 if ~(ishostlayer && (hidx == idx))
235 % set the host demand for the activity
236 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
237 end
238 if lqn.sched(tidx_caller)~=SchedStrategy.REF % in 'ref' case the service activity is constant
239 % updmap(end+1,:) = [idx, aidx, 1, idxClass{aidx}.index];
240 end
241 if iscachelayer && full(lqn.graph(eidx,aidx))
242 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
243 end
244
245 % Create auxiliary think-time class if activity has think-time
246 if ~isempty(lqn.actthink{aidx}) && lqn.actthink{aidx}.getMean() > GlobalConstants.FineTol
247 aidxThinkClass{aidx} = ClosedClass(model, [lqn.hashnames{aidx},'.Think'], 0, clientDelay);
248 aidxThinkClass{aidx}.completes = false;
249 aidxThinkClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
250 clientDelay.setService(aidxThinkClass{aidx}, lqn.actthink{aidx});
251 for m=1:nreplicas
252 serverStation{m}.setService(aidxThinkClass{aidx}, Disabled.getInstance());
253 end
254 self.actthinkt_classes_updmap{idx}(end+1,:) = [idx, aidx, 1, aidxThinkClass{aidx}.index];
255 end
256 end
257 % add a class for each outgoing call from this activity
258 for cidx = lqn.callsof{aidx}
259 callmean(cidx) = lqn.callproc{cidx}.getMean;
260 switch lqn.calltype(cidx)
261 case CallType.ASYNC
262 if lqn.parent(lqn.callpair(cidx,2)) == idx % add only if the target is serverStation
263 if ~hasSource % we need to add source and sink to the model
264 hasSource = true;
265 model.attribute.sourceIdx = length(model.nodes)+1;
266 sourceStation = Source(model,'Source');
267 sinkStation = Sink(model,'Sink');
268 end
269 cidxClass{cidx} = OpenClass(model, lqn.callhashnames{cidx}, 0);
270 sourceStation.setArrival(cidxClass{cidx}, Immediate.getInstance());
271 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
272 for m=1:nreplicas
273 serverStation{m}.setService(cidxClass{cidx}, Immediate.getInstance());
274 end
275 openClasses(end+1,:) = [cidxClass{cidx}.index, callmean(cidx), cidx];
276 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
277 cidxClass{cidx}.completes = false;
278 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
279 minRespT = 0;
280 for tidx_act = lqn.actsof{idx}
281 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
282 end
283 for m=1:nreplicas
284 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
285 end
286 end
287 case CallType.SYNC
288 cidxClass{cidx} = ClosedClass(model, lqn.callhashnames{cidx}, 0, clientDelay);
289 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
290 for m=1:nreplicas
291 serverStation{m}.setService(cidxClass{cidx}, Disabled.getInstance());
292 end
293 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
294 cidxClass{cidx}.completes = false;
295 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
296 minRespT = 0;
297 for tidx_act = lqn.actsof{idx}
298 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
299 end
300 for m=1:nreplicas
301 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
302 end
303 end
304
305 if callmean(cidx) ~= nreplicas
306 switch lqn.calltype(cidx)
307 case CallType.SYNC
308 cidxAuxClass{cidx} = ClosedClass(model, [lqn.callhashnames{cidx},'.Aux'], 0, clientDelay);
309 cidxAuxClass{cidx}.completes = false;
310 cidxAuxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
311 clientDelay.setService(cidxAuxClass{cidx}, Immediate.getInstance());
312 for m=1:nreplicas
313 serverStation{m}.setService(cidxAuxClass{cidx}, Disabled.getInstance());
314 end
315 end
316 end
317 end
318 end
319end
320
321% Ensure Source's sourceClasses and arrivalProcess arrays are properly sized for all classes
322% This is needed because the Source may be created during class iteration
323% when only some classes exist, and new closed classes added afterwards
324% won't have corresponding entries in sourceClasses/arrivalProcess
325if hasSource
326 nClasses = model.getNumberOfClasses();
327 for k = 1:nClasses
328 if k > length(sourceStation.input.sourceClasses) || isempty(sourceStation.input.sourceClasses{k})
329 sourceStation.input.sourceClasses{k} = {[], ServiceStrategy.LI, Disabled.getInstance()};
330 end
331 if k > length(sourceStation.arrivalProcess) || isempty(sourceStation.arrivalProcess{k})
332 sourceStation.arrivalProcess{k} = Disabled.getInstance();
333 end
334 end
335end
336
337P = model.initRoutingMatrix;
338if hasSource
339 for o = 1:size(openClasses,1)
340 oidx = openClasses(o,1);
341 p = 1 / openClasses(o,2); % divide by mean number of calls, they go to a server at random
342 for m=1:nreplicas
343 P{model.classes{oidx}, model.classes{oidx}}(sourceStation,serverStation{m}) = 1/nreplicas;
344 for n=1:nreplicas
345 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},serverStation{n}) = (1-p)/nreplicas;
346 end
347 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},sinkStation) = p;
348 end
349 cidx = openClasses(o,3); % 3 = source
350 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(sourceStation), oidx];
351 for m=1:nreplicas
352 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), oidx];
353 end
354 end
355end
356
357%% job positions are encoded as follows: 1=client, 2=any of the nreplicas server stations, 3=cache node, 4=fork node, 5=join node
358atClient = 1;
359atServer = 2;
360atCache = 3;
361
362jobPos = atClient; % start at client
363% second pass: setup the routing out of entries
364for tidx_caller = callers
365 % Use same condition as first pass - only process if closed class was created
366 hasDirectCallers = false;
367 if ishostlayer
368 if lqn.isref(tidx_caller)
369 hasDirectCallers = true;
370 else
371 for eidx_check = lqn.entriesof{tidx_caller}
372 if any(full(lqn.issynccaller(:, eidx_check))) || any(full(lqn.isasynccaller(:, eidx_check)))
373 hasDirectCallers = true;
374 break;
375 end
376 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
377 iscell(lqn.arrival) && eidx_check <= length(lqn.arrival) && ...
378 ~isempty(lqn.arrival{eidx_check})
379 hasDirectCallers = true;
380 break;
381 end
382 end
383 end
384 end
385 if (ishostlayer && hasDirectCallers) | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
386 % for each entry of the calling task
387 ncaller_entries = length(lqn.entriesof{tidx_caller});
388 for eidx = lqn.entriesof{tidx_caller}
389 aidxClass_eidx = aidxClass{eidx};
390 aidxClass_tidx_caller = aidxClass{tidx_caller};
391 % initialize the probability to select an entry to be identical
392 P{aidxClass_tidx_caller, aidxClass_eidx}(clientDelay, clientDelay) = 1 / ncaller_entries;
393 if ncaller_entries > 1
394 % at successive iterations make sure to replace this with throughput ratio
395 self.route_prob_updmap{idx}(end+1,:) = [idx, tidx_caller, eidx, 1, 1, aidxClass_tidx_caller.index, aidxClass_eidx.index];
396 end
397 P = recurActGraph(P, tidx_caller, eidx, aidxClass_eidx, jobPos);
398 end
399 end
400end
401
402% Setup routing for entry-level open arrivals (AFTER recurActGraph to avoid being overwritten)
403if hasSource && ~isempty(entryOpenClasses)
404 for e = 1:size(entryOpenClasses,1)
405 eoidx = entryOpenClasses(e,1); % class index
406 openClass = model.classes{eoidx};
407
408 % Explicitly set routing: ONLY Source → Server → Sink
409 % Zero out all routing for this class first
410 for node1 = 1:length(model.nodes)
411 for node2 = 1:length(model.nodes)
412 P{openClass, openClass}(node1, node2) = 0;
413 end
414 end
415
416 % Now set the correct routing
417 for m=1:nreplicas
418 % Route: Source → ServerStation → Sink
419 P{openClass, openClass}(sourceStation,serverStation{m}) = 1/nreplicas;
420 P{openClass, openClass}(serverStation{m},sinkStation) = 1.0;
421 end
422 end
423end
424
425model.link(P);
426self.ensemble{idx} = model;
427
428 function [P, curClass, jobPos] = recurActGraph(P, tidx_caller, aidx, curClass, jobPos)
429 jobPosKey(aidx) = jobPos;
430 curClassKey{aidx} = curClass;
431 nextaidxs = find(lqn.graph(aidx,:)); % these include the called entries
432 if ~isempty(nextaidxs)
433 isNextPrecFork(aidx) = any(isPostAndAct(nextaidxs)); % indexed on aidx to avoid losing it during the recursion
434 end
435
436 for nextaidx = nextaidxs % for all successor activities
437 if ~isempty(nextaidx)
438 isLoop = false;
439 % in the activity graph, the following if is entered only
440 % by an edge that is the return from a LOOP activity
441 if (lqn.graph(aidx,nextaidx) ~= lqn.dag(aidx,nextaidx))
442 isLoop = true;
443 end
444 if ~(lqn.parent(aidx) == lqn.parent(nextaidx)) % if different parent task
445 % if the successor activity is an entry of another task, this is a call
446 cidx = matchrow(lqn.callpair,[aidx,nextaidx]); % find the call index
447 switch lqn.calltype(cidx)
448 case CallType.ASYNC
449 % Async calls don't modify caller routing - caller continues immediately without blocking.
450 % Arrival rate at destination is handled via arvproc_classes_updmap (lines 170-195, 230-248).
451 case CallType.SYNC
452 [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass);
453 case CallType.FWD
454 % Forwarding: after the source entry's activity completes,
455 % route to the forwarding target's bound activity on the same processor
456 if ishostlayer
457 target_entry = nextaidx;
458 target_bound_acts = find(lqn.graph(target_entry, :));
459 if ~isempty(target_bound_acts)
460 target_aidx = target_bound_acts(1); % First bound activity
461 fwd_prob = full(lqn.graph(aidx, target_entry)); % Forwarding probability
462
463 % Create class for target activity if needed
464 if isempty(aidxClass{target_aidx})
465 aidxClass{target_aidx} = ClosedClass(model, lqn.hashnames{target_aidx}, 0, clientDelay);
466 clientDelay.setService(aidxClass{target_aidx}, Disabled.getInstance());
467 for m=1:nreplicas
468 serverStation{m}.setService(aidxClass{target_aidx}, Disabled.getInstance());
469 end
470 aidxClass{target_aidx}.completes = false;
471 aidxClass{target_aidx}.attribute = [LayeredNetworkElement.ACTIVITY, target_aidx];
472 model.attribute.activities(end+1,:) = [aidxClass{target_aidx}.index, target_aidx];
473 end
474
475 % Route from current position to target activity at server
476 for m=1:nreplicas
477 if jobPos == atClient
478 P{curClass, aidxClass{target_aidx}}(clientDelay, serverStation{m}) = fwd_prob / nreplicas;
479 else
480 P{curClass, aidxClass{target_aidx}}(serverStation{m}, serverStation{m}) = fwd_prob;
481 end
482 serverStation{m}.setService(aidxClass{target_aidx}, lqn.hostdem{target_aidx});
483 end
484 self.servt_classes_updmap{idx}(end+1,:) = [idx, target_aidx, 2, aidxClass{target_aidx}.index];
485
486 jobPos = atServer;
487 curClass = aidxClass{target_aidx};
488
489 % Check if target entry also has forwarding (chained forwarding)
490 target_nextaidxs = find(lqn.graph(target_entry, :));
491 for target_nextaidx = target_nextaidxs
492 if target_nextaidx ~= target_aidx % Skip the bound activity we just processed
493 % Check if this is another forwarding call
494 target_cidx = matchrow(lqn.callpair, [target_entry, target_nextaidx]);
495 if ~isempty(target_cidx) && lqn.calltype(target_cidx) == CallType.FWD
496 % Recursively handle chained forwarding
497 [P, curClass, jobPos] = recurActGraph(P, tidx_caller, target_entry, curClass, jobPos);
498 end
499 end
500 end
501 end
502 end
503 end
504 else
505 % at this point, we have processed all calls, let us do the
506 % activities local to the task next
507 if isempty(intersect(lqn.eshift+(1:lqn.nentries), nextaidxs))
508 % if next activity is not an entry
509 jobPos = jobPosKey(aidx);
510 curClass = curClassKey{aidx};
511 else
512 if ismember(nextaidxs(find(nextaidxs==nextaidx)-1), lqn.eshift+(1:lqn.nentries))
513 curClassC = curClass;
514 end
515 jobPos = atClient;
516 curClass = curClassC;
517 end
518 if jobPos == atClient % at client node
519 if ishostlayer
520 if ~iscachelayer
521 for m=1:nreplicas
522 if isNextPrecFork(aidx)
523 % if next activity is a post-and
524 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
525 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
526 forkClassStack(end+1) = curClass.index;
527 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
528 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
529 else
530 if isPreAndAct(aidx)
531 % before entering the job we go back to the entry class at the last fork
532 forkClass = model.classes{forkClassStack(end)};
533 forkClassStack(end) = [];
534 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
535 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
536 else
537 P{curClass, aidxClass{nextaidx}}(clientDelay,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
538 end
539 end
540 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
541 if isfunctionlayer
542 serverStation{m}.setDelayOff(aidxClass{nextaidx}, lqn.setuptime{lqn.parent(nextaidx)}, lqn.delayofftime{lqn.parent(nextaidx)});
543 end
544 end
545 jobPos = atServer;
546 curClass = aidxClass{nextaidx};
547 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
548 else
549 P{curClass, aidxClass{nextaidx}}(clientDelay,cacheNode) = full(lqn.graph(aidx,nextaidx));
550
551 cacheNode.setReadItemEntry(aidxClass{nextaidx},lqn.itemproc{aidx},lqn.nitems(aidx));
552 lqn.hitmissaidx = find(lqn.graph(nextaidx,:));
553 lqn.hitaidx = lqn.hitmissaidx(1);
554 lqn.missaidx = lqn.hitmissaidx(2);
555
556 cacheNode.setHitClass(aidxClass{nextaidx},aidxClass{lqn.hitaidx});
557 cacheNode.setMissClass(aidxClass{nextaidx},aidxClass{lqn.missaidx});
558
559 jobPos = atCache; % cache
560 curClass = aidxClass{nextaidx};
561 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.hitaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.hitaidx}.index];
562 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.missaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.missaidx}.index];
563 end
564 else % not ishostlayer
565 if isNextPrecFork(aidx)
566 % if next activity is a post-and
567 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
568 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
569 forkClassStack(end+1) = curClass.index;
570 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
571 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
572 else
573 if isPreAndAct(aidx)
574 % before entering the job we go back to the entry class at the last fork
575 forkClass = model.classes{forkClassStack(end)};
576 forkClassStack(end) = [];
577 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
578 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
579 else
580 P{curClass, aidxClass{nextaidx}}(clientDelay,clientDelay) = full(lqn.graph(aidx,nextaidx));
581 end
582 end
583 jobPos = atClient;
584 curClass = aidxClass{nextaidx};
585 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
586 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
587 end
588 elseif jobPos == atServer || jobPos == atCache % at server station
589 if ishostlayer
590 if iscachelayer
591 curClass = aidxClass{nextaidx};
592 for m=1:nreplicas
593 if isNextPrecFork(aidx)
594 % if next activity is a post-and
595 P{curClass, curClass}(cacheNode, forkNode) = 1.0;
596 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
597 forkClassStack(end+1) = curClass.index;
598 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
599 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
600 else
601 if isPreAndAct(aidx)
602 % before entering the job we go back to the entry class at the last fork
603 forkClass = model.classes{forkClassStack(end)};
604 forkClassStack(end) = [];
605
606 P{curClass, forkClass}(cacheNode,joinNode) = 1.0;
607 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
608 else
609 P{curClass, aidxClass{nextaidx}}(cacheNode,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
610 end
611 end
612 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
613 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, nextaidx, 3, 2, aidxClass{nextaidx}.index, aidxClass{nextaidx}.index];
614 end
615 else
616 for m=1:nreplicas
617 if isNextPrecFork(aidx)
618 % if next activity is a post-and
619 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
620 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
621 forkClassStack(end+1) = curClass.index;
622 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
623 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
624 else
625 if isPreAndAct(aidx)
626 % before entering the job we go back to the entry class at the last fork
627 forkClass = model.classes{forkClassStack(end)};
628 forkClassStack(end) = [];
629 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
630 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
631 else
632 P{curClass, aidxClass{nextaidx}}(serverStation{m},serverStation{m}) = full(lqn.graph(aidx,nextaidx));
633 end
634 end
635 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
636 end
637 end
638 jobPos = atServer;
639 curClass = aidxClass{nextaidx};
640 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
641 else
642 for m=1:nreplicas
643 if isNextPrecFork(aidx)
644 % if next activity is a post-and
645 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
646 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
647 forkClassStack(end+1) = curClass.index;
648 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
649 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
650 else
651 if isPreAndAct(aidx)
652 % before entering the job we go back to the entry class at the last fork
653 forkClass = model.classes{forkClassStack(end)};
654 forkClassStack(end) = [];
655 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
656 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
657 else
658 P{curClass, aidxClass{nextaidx}}(serverStation{m},clientDelay) = full(lqn.graph(aidx,nextaidx));
659 end
660 end
661 end
662 jobPos = atClient;
663 curClass = aidxClass{nextaidx};
664 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
665 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
666 end
667 end
668 if aidx ~= nextaidx && ~isLoop
669 %% now recursively build the rest of the routing matrix graph
670 [P, curClass, jobPos] = recurActGraph(P, tidx_caller, nextaidx, curClass, jobPos);
671
672 % At this point curClassRec is the last class in the
673 % recursive branch, which we now close with a reply
674 if jobPos == atClient
675 P{curClass, aidxClass{tidx_caller}}(clientDelay,clientDelay) = 1;
676 if ~strcmp(curClass.name(end-3:end),'.Aux')
677 curClass.completes = true;
678 end
679 else
680 for m=1:nreplicas
681 P{curClass, aidxClass{tidx_caller}}(serverStation{m},clientDelay) = 1;
682 end
683 if ~strcmp(curClass.name(end-3:end),'.Aux')
684 curClass.completes = true;
685 end
686 end
687 end
688 end
689 end
690 end % nextaidx
691 end
692
693 function [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass)
694 switch jobPos
695 case atClient
696 if lqn.parent(lqn.callpair(cidx,2)) == idx
697 % if a call to an entry of the server in this layer
698 if callmean(cidx) < nreplicas
699 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx); % note that callmean(cidx) < nreplicas
700 for m=1:nreplicas
701 % if isNextPrecFork(aidx)
702 % end
703 % % if next activity is a post-and
704 % P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
705 % forkStackClass(end+1) = curClass.index;
706 % f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
707 % P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
708 % P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
709 % else
710 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = callmean(cidx) / nreplicas;
711 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
712 end
713 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
714 elseif callmean(cidx) == nreplicas
715 for m=1:nreplicas
716 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
717 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0;
718 end
719 else % callmean(cidx) > nreplicas
720 for m=1:nreplicas
721 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
722 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1.0 ;
723 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1.0 - 1.0 / (callmean(cidx) / nreplicas);
724 end
725 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0 / (callmean(cidx));
726 end
727 jobPos = atClient;
728 clientDelay.setService(cidxClass{cidx}, Immediate.getInstance());
729 for m=1:nreplicas
730 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
731 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
732 end
733 curClass = cidxClass{cidx};
734 else
735 % if it is not a call to an entry of the server
736 if callmean(cidx) < nreplicas
737 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
738 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
739 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
740 curClass = cidxAuxClass{cidx};
741 elseif callmean(cidx) == nreplicas
742 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1;
743 curClass = cidxClass{cidx};
744 else % callmean(cidx) > nreplicas
745 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
746 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;% / (callmean(cidx)/nreplicas); % the mean number of calls is now embedded in the demand
747 curClass = cidxAuxClass{cidx};
748 end
749 jobPos = atClient;
750 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
751 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
752 end
753 case atServer % job at server
754 if lqn.parent(lqn.callpair(cidx,2)) == idx
755 % if it is a call to an entry of the server
756 if callmean(cidx) < nreplicas
757 for m=1:nreplicas
758 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1 - callmean(cidx);
759 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = callmean(cidx);
760 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
761 end
762 jobPos = atClient;
763 curClass = cidxAuxClass{cidx};
764 elseif callmean(cidx) == nreplicas
765 for m=1:nreplicas
766 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
767 end
768 jobPos = atServer;
769 curClass = cidxClass{cidx};
770 else % callmean(cidx) > nreplicas
771 for m=1:nreplicas
772 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
773 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1 - 1 / (callmean(cidx));
774 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1 / (callmean(cidx));
775 end
776 jobPos = atClient;
777 curClass = cidxAuxClass{cidx};
778 end
779 for m=1:nreplicas
780 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
781 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
782 end
783 else
784 % if it is not a call to an entry of the server
785 % callmean not needed since we switched
786 % to ResidT to model service time at client
787 if callmean(cidx) < nreplicas
788 for m=1:nreplicas
789 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
790 end
791 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
792 curClass = cidxAuxClass{cidx};
793 elseif callmean(cidx) == nreplicas
794 for m=1:nreplicas
795 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
796 end
797 curClass = cidxClass{cidx};
798 else % callmean(cidx) > nreplicas
799 for m=1:nreplicas
800 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
801 end
802 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
803 curClass = cidxAuxClass{cidx};
804 end
805 jobPos = atClient;
806 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
807 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
808 end
809 end
810
811 % After synch call returns, route through activity think-time class if applicable
812 callingAidx = lqn.callpair(cidx, 1); % source activity of the call
813 if ~isempty(aidxThinkClass{callingAidx})
814 % Route from current class to think-time class at client
815 P{curClass, aidxThinkClass{callingAidx}}(clientDelay, clientDelay) = 1.0;
816 curClass = aidxThinkClass{callingAidx};
817 jobPos = atClient;
818 end
819 end
820end
Definition mmt.m:92