LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
buildLayersRecursive.m
1function buildLayersRecursive(self, idx, callers, ishostlayer)
2lqn = self.lqn;
3jobPosKey = zeros(lqn.nidx,1);
4curClassKey = cell(lqn.nidx,1);
5% Fan-out check: when all callers have fan-out >= nreplicas for this task,
6% each replica sees the full caller traffic (fork-join semantics).
7% Model a single representative replica; updateThinkTimes multiplies by K.
8% For host layers: if all caller tasks have the same replication as the host,
9% the host is co-replicated with the task, so also use single-replica modeling.
10rawReplicas = lqn.repl(idx);
11reduceFanout = false;
12if rawReplicas > 1 && ~isempty(callers)
13 if ~ishostlayer && isfield(lqn, 'fanout') && ~isempty(lqn.fanout)
14 reduceFanout = true;
15 for c = callers(:)'
16 if lqn.fanout(c, idx) < rawReplicas
17 reduceFanout = false;
18 break;
19 end
20 end
21 elseif ishostlayer
22 reduceFanout = true;
23 for c = callers(:)'
24 if lqn.repl(c) ~= rawReplicas
25 reduceFanout = false;
26 break;
27 end
28 end
29 end
30end
31if reduceFanout
32 nreplicas = 1;
33 if ~ishostlayer
34 self.singleReplicaTasks(end+1) = idx;
35 end
36else
37 nreplicas = rawReplicas;
38end
39%mult = lqn.mult;
40mult = lqn.maxmult; % this removes spare capacity that cannot be used
41lqn.mult = mult;
42callservtproc = self.callservtproc;
43model = Network(lqn.hashnames{idx});
44model.setChecks(false); % fast mode
45model.attribute = struct('hosts',[],'tasks',[],'entries',[],'activities',[],'calls',[],'serverIdx',0);
46if ishostlayer | any(any(lqn.issynccaller(callers, lqn.entriesof{idx}))) | any(any(lqn.isasynccaller(callers, lqn.entriesof{idx}))) %#ok<OR2>
47 clientDelay = Delay(model, 'Clients');
48 model.attribute.clientIdx = 1;
49 model.attribute.serverIdx = 2;
50 model.attribute.sourceIdx = NaN;
51else
52 model.attribute.serverIdx = 1;
53 model.attribute.clientIdx = NaN;
54 model.attribute.sourceIdx = NaN;
55end
56serverStation = cell(1,nreplicas);
57isfunctionlayer = all(lqn.isfunction(callers)) && ishostlayer;
58for m=1:nreplicas
59 if m == 1
60 serverStation{m} = Queue(model,lqn.hashnames{idx}, lqn.sched(idx));
61 else
62 serverStation{m} = Queue(model,[lqn.hashnames{idx},'.',num2str(m)], lqn.sched(idx));
63 end
64 serverStation{m}.setNumberOfServers(mult(idx));
65 serverStation{m}.attribute.ishost = ishostlayer;
66 serverStation{m}.attribute.idx = idx;
67end
68
69iscachelayer = all(lqn.iscache(callers)) && ishostlayer;
70if iscachelayer
71 cacheNode = Cache(model, lqn.hashnames{callers}, lqn.nitems(callers), lqn.itemcap{callers}, lqn.replacestrat(callers));
72end
73
74actsInCaller = [lqn.actsof{callers}];
75isPostAndAct = full(lqn.actposttype)==ActivityPrecedenceType.POST_AND;
76isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
77hasfork = any(intersect(find(isPostAndAct),actsInCaller));
78
79maxfanout = 1; % maximum output parallelism level of fork nodes
80for aidx = actsInCaller(:)'
81 successors = find(lqn.graph(aidx,:));
82 if any(isPostAndAct(successors))
83 maxfanout = max(maxfanout, sum(isPostAndAct(successors)));
84 end
85end
86
87if hasfork
88 forkNode = Fork(model, 'Fork_PostAnd');
89 for f=1:maxfanout
90 forkOutputRouter{f} = Router(model, ['Fork_PostAnd_',num2str(f)]);
91 end
92 forkClassStack = []; % stack with the entry class at the visited forks, the last visited is end of the list.
93end
94
95isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
96hasjoin = any(isPreAndAct(actsInCaller));
97if hasjoin
98 joinNode = Join(model, 'Join_PreAnd', forkNode);
99end
100
101aidxClass = cell(1, lqn.nidx);
102aidxThinkClass = cell(1, lqn.nidx); % auxiliary classes for activity think-time
103cidxClass = cell(1,0);
104cidxAuxClass = cell(1,0);
105
106self.servt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % server classes to update
107self.thinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % client classes to update
108self.actthinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % activity think-time classes to update
109self.arvproc_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % classes to update in the next iteration for asynch calls
110self.call_classes_updmap{idx} = zeros(0,4); % [modelidx, callidx, node, class] % calls classes to update in the next iteration (includes calls in client classes)
111self.route_prob_updmap{idx} = zeros(0,7); % [modelidx, actidxfrom, actidxto, nodefrom, nodeto, classfrom, classto] % routing probabilities to update in the next iteration
112
113if ishostlayer
114 model.attribute.hosts(end+1,:) = [NaN, model.attribute.serverIdx ];
115else
116 model.attribute.tasks(end+1,:) = [NaN, model.attribute.serverIdx ];
117end
118
119hasSource = false; % flag whether a source is needed
120openClasses = [];
121entryOpenClasses = []; % track entry-level open arrivals
122% first pass: create the classes
123for tidx_caller = callers
124 % For host layers, check if the task has any entries with sync/async callers
125 % or has open arrivals, OR if any entry is a forwarding target.
126 hasDirectCallers = false;
127 isForwardingTarget = false;
128 if ishostlayer
129 % Check if the task is a reference task (always create closed class)
130 if lqn.isref(tidx_caller)
131 hasDirectCallers = true;
132 else
133 % Check if any entry of this task has sync or async callers
134 for eidx = lqn.entriesof{tidx_caller}
135 if any(full(lqn.issynccaller(:, eidx))) || any(full(lqn.isasynccaller(:, eidx)))
136 hasDirectCallers = true;
137 break;
138 end
139 % Also check for open arrivals on this entry
140 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
141 iscell(lqn.arrival) && eidx <= length(lqn.arrival) && ...
142 ~isempty(lqn.arrival{eidx})
143 hasDirectCallers = true;
144 break;
145 end
146 % Check if this entry is a forwarding target
147 for cidx = 1:lqn.ncalls
148 if full(lqn.calltype(cidx)) == CallType.FWD && full(lqn.callpair(cidx, 2)) == eidx
149 isForwardingTarget = true;
150 break;
151 end
152 end
153 end
154 end
155 end
156 if (ishostlayer && (hasDirectCallers || isForwardingTarget)) | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2> % if it is only an asynch caller the closed classes are not needed
157 if self.njobs(tidx_caller,idx) == 0
158 % for each entry of the calling task
159 % determine job population
160 % this block matches the corresponding calculations in
161 % updateThinkTimes
162 % Use single-replica njobs if this layer or the caller is in single-replica mode
163 callerIsSingleReplica = reduceFanout || any(self.singleReplicaTasks == tidx_caller);
164 if callerIsSingleReplica
165 njobs = mult(tidx_caller);
166 else
167 njobs = mult(tidx_caller)*lqn.repl(tidx_caller);
168 end
169 if isinf(njobs)
170 callers_of_tidx_caller = find(lqn.taskgraph(:,tidx_caller));
171 njobs = sum(mult(callers_of_tidx_caller)); %#ok<FNDSB>
172 if isinf(njobs)
173 % if also the callers of tidx_caller are inf servers, then use
174 % an heuristic
175 njobs = min(sum(mult(isfinite(mult)) .* lqn.repl(isfinite(mult))),1000); % Python parity: cap at 1000
176 end
177 end
178 self.njobs(tidx_caller,idx) = njobs;
179 else
180 njobs = self.njobs(tidx_caller,idx);
181 end
182 caller_name = lqn.hashnames{tidx_caller};
183 aidxClass{tidx_caller} = ClosedClass(model, caller_name, njobs, clientDelay);
184 clientDelay.setService(aidxClass{tidx_caller}, Disabled.getInstance());
185 for m=1:nreplicas
186 serverStation{m}.setService(aidxClass{tidx_caller}, Disabled.getInstance());
187 end
188 aidxClass{tidx_caller}.completes = false;
189 aidxClass{tidx_caller}.setReferenceClass(true); % renormalize residence times using the visits to the task
190 aidxClass{tidx_caller}.attribute = [LayeredNetworkElement.TASK, tidx_caller];
191 model.attribute.tasks(end+1,:) = [aidxClass{tidx_caller}.index, tidx_caller];
192 clientDelay.setService(aidxClass{tidx_caller}, self.thinkproc{tidx_caller});
193 if ~lqn.isref(tidx_caller)
194 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, tidx_caller, 1, aidxClass{tidx_caller}.index];
195 end
196 for eidx = lqn.entriesof{tidx_caller}
197 % create a class
198 aidxClass{eidx} = ClosedClass(model, lqn.hashnames{eidx}, 0, clientDelay);
199 clientDelay.setService(aidxClass{eidx}, Disabled.getInstance());
200 for m=1:nreplicas
201 serverStation{m}.setService(aidxClass{eidx}, Disabled.getInstance());
202 end
203 aidxClass{eidx}.completes = false;
204 aidxClass{eidx}.attribute = [LayeredNetworkElement.ENTRY, eidx];
205 model.attribute.entries(end+1,:) = [aidxClass{eidx}.index, eidx];
206 [singleton, javasingleton] = Immediate.getInstance();
207 if isempty(model.obj)
208 clientDelay.setService(aidxClass{eidx}, singleton);
209 else
210 clientDelay.setService(aidxClass{eidx}, javasingleton);
211 end
212
213 % Check for open arrival distribution on this entry
214 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
215 iscell(lqn.arrival) && eidx <= length(lqn.arrival) && ...
216 ~isempty(lqn.arrival{eidx})
217
218 if ~hasSource
219 hasSource = true;
220 model.attribute.sourceIdx = length(model.nodes)+1;
221 sourceStation = Source(model,'Source');
222 sinkStation = Sink(model,'Sink');
223 end
224
225 % Create open class for this entry
226 openClassForEntry = OpenClass(model, [lqn.hashnames{eidx}, '_Open'], 0);
227 sourceStation.setArrival(openClassForEntry, lqn.arrival{eidx});
228 clientDelay.setService(openClassForEntry, Disabled.getInstance());
229
230 % Use bound activity's service time (entries themselves have Immediate service)
231 % Find activities bound to this entry via graph
232 bound_act_indices = find(lqn.graph(eidx,:) > 0);
233 if ~isempty(bound_act_indices)
234 % Use first bound activity's service time
235 bound_aidx = bound_act_indices(1);
236 for m=1:nreplicas
237 serverStation{m}.setService(openClassForEntry, self.servtproc{bound_aidx});
238 end
239 else
240 % Fallback to entry service (should not happen in well-formed models)
241 for m=1:nreplicas
242 serverStation{m}.setService(openClassForEntry, self.servtproc{eidx});
243 end
244 end
245
246 % Track for routing setup later: [class_index, entry_index]
247 entryOpenClasses(end+1,:) = [openClassForEntry.index, eidx];
248
249 % Track: Use negative entry index to distinguish from call arrivals
250 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, -eidx, ...
251 model.getNodeIndex(sourceStation), openClassForEntry.index];
252
253 openClassForEntry.completes = false;
254 openClassForEntry.attribute = [LayeredNetworkElement.ENTRY, eidx];
255 end
256 end
257 end
258
259 % for each activity of the calling task
260 for aidx = lqn.actsof{tidx_caller}
261 if ishostlayer | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
262 % create a class
263 aidxClass{aidx} = ClosedClass(model, lqn.hashnames{aidx}, 0, clientDelay);
264 clientDelay.setService(aidxClass{aidx}, Disabled.getInstance());
265 for m=1:nreplicas
266 serverStation{m}.setService(aidxClass{aidx}, Disabled.getInstance());
267 end
268 aidxClass{aidx}.completes = false;
269 aidxClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
270 model.attribute.activities(end+1,:) = [aidxClass{aidx}.index, aidx];
271 hidx = lqn.parent(lqn.parent(aidx)); % index of host processor
272 if ~(ishostlayer && (hidx == idx))
273 % set the host demand for the activity
274 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
275 end
276 if lqn.sched(tidx_caller)~=SchedStrategy.REF % in 'ref' case the service activity is constant
277 % updmap(end+1,:) = [idx, aidx, 1, idxClass{aidx}.index];
278 end
279 if iscachelayer && full(lqn.graph(eidx,aidx))
280 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
281 end
282
283 % Create auxiliary think-time class if activity has think-time
284 if ~isempty(lqn.actthink{aidx}) && lqn.actthink{aidx}.getMean() > GlobalConstants.FineTol
285 aidxThinkClass{aidx} = ClosedClass(model, [lqn.hashnames{aidx},'.Think'], 0, clientDelay);
286 aidxThinkClass{aidx}.completes = false;
287 aidxThinkClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
288 clientDelay.setService(aidxThinkClass{aidx}, lqn.actthink{aidx});
289 for m=1:nreplicas
290 serverStation{m}.setService(aidxThinkClass{aidx}, Disabled.getInstance());
291 end
292 self.actthinkt_classes_updmap{idx}(end+1,:) = [idx, aidx, 1, aidxThinkClass{aidx}.index];
293 end
294 end
295 % add a class for each outgoing call from this activity
296 for cidx = lqn.callsof{aidx}
297 callmean(cidx) = lqn.callproc{cidx}.getMean;
298 switch lqn.calltype(cidx)
299 case CallType.ASYNC
300 if lqn.parent(lqn.callpair(cidx,2)) == idx % add only if the target is serverStation
301 if ~hasSource % we need to add source and sink to the model
302 hasSource = true;
303 model.attribute.sourceIdx = length(model.nodes)+1;
304 sourceStation = Source(model,'Source');
305 sinkStation = Sink(model,'Sink');
306 end
307 cidxClass{cidx} = OpenClass(model, lqn.callhashnames{cidx}, 0);
308 sourceStation.setArrival(cidxClass{cidx}, Immediate.getInstance());
309 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
310 for m=1:nreplicas
311 serverStation{m}.setService(cidxClass{cidx}, Immediate.getInstance());
312 end
313 openClasses(end+1,:) = [cidxClass{cidx}.index, callmean(cidx), cidx];
314 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
315 cidxClass{cidx}.completes = false;
316 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
317 minRespT = 0;
318 for tidx_act = lqn.actsof{idx}
319 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
320 end
321 for m=1:nreplicas
322 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
323 end
324 end
325 case CallType.SYNC
326 cidxClass{cidx} = ClosedClass(model, lqn.callhashnames{cidx}, 0, clientDelay);
327 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
328 for m=1:nreplicas
329 serverStation{m}.setService(cidxClass{cidx}, Disabled.getInstance());
330 end
331 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
332 cidxClass{cidx}.completes = false;
333 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
334 minRespT = 0;
335 for tidx_act = lqn.actsof{idx}
336 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
337 end
338 for m=1:nreplicas
339 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
340 end
341 end
342
343 if callmean(cidx) ~= nreplicas
344 switch lqn.calltype(cidx)
345 case CallType.SYNC
346 cidxAuxClass{cidx} = ClosedClass(model, [lqn.callhashnames{cidx},'.Aux'], 0, clientDelay);
347 cidxAuxClass{cidx}.completes = false;
348 cidxAuxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
349 clientDelay.setService(cidxAuxClass{cidx}, Immediate.getInstance());
350 for m=1:nreplicas
351 serverStation{m}.setService(cidxAuxClass{cidx}, Disabled.getInstance());
352 end
353 end
354 end
355
356 % For SYNC calls in task layers, create classes for forwarding
357 % calls from the target entry. This implements synthetic
358 % synchronization: the caller blocks until the forwarding
359 % target completes, modeling contention correctly.
360 % FWD calls have the source ENTRY (not activity) in callpair(:,1),
361 % so we scan all calls to find FWD calls from the target entry.
362 % For chain forwarding (e0->e1->e2), recursively follow the chain.
363 if ~ishostlayer && lqn.calltype(cidx) == CallType.SYNC
364 % Collect all FWD calls reachable through forwarding chains
365 entries_to_scan = lqn.callpair(cidx, 2); % start with sync target
366 scanned_entries = [];
367 while ~isempty(entries_to_scan)
368 target_eidx = entries_to_scan(1);
369 entries_to_scan(1) = [];
370 if ismember(target_eidx, scanned_entries)
371 continue; % avoid cycles
372 end
373 scanned_entries(end+1) = target_eidx; %#ok<AGROW>
374 for fwd_cidx_iter = 1:lqn.ncalls
375 if lqn.calltype(fwd_cidx_iter) == CallType.FWD && lqn.callpair(fwd_cidx_iter, 1) == target_eidx
376 callmean(fwd_cidx_iter) = lqn.callproc{fwd_cidx_iter}.getMean;
377 cidxClass{fwd_cidx_iter} = ClosedClass(model, lqn.callhashnames{fwd_cidx_iter}, 0, clientDelay);
378 cidxClass{fwd_cidx_iter}.completes = false;
379 cidxClass{fwd_cidx_iter}.attribute = [LayeredNetworkElement.CALL, fwd_cidx_iter];
380 clientDelay.setService(cidxClass{fwd_cidx_iter}, Disabled.getInstance());
381 for m=1:nreplicas
382 serverStation{m}.setService(cidxClass{fwd_cidx_iter}, Disabled.getInstance());
383 end
384 model.attribute.calls(end+1,:) = [cidxClass{fwd_cidx_iter}.index, fwd_cidx_iter, lqn.callpair(fwd_cidx_iter,1), lqn.callpair(fwd_cidx_iter,2)];
385 % Follow the chain: scan the FWD target for further FWD calls
386 fwd_target_eidx = lqn.callpair(fwd_cidx_iter, 2);
387 if ~ismember(fwd_target_eidx, scanned_entries) && ~ismember(fwd_target_eidx, entries_to_scan)
388 entries_to_scan(end+1) = fwd_target_eidx; %#ok<AGROW>
389 end
390 end
391 end
392 end
393 end
394 end
395 end
396end
397
398% Ensure Source's sourceClasses and arrivalProcess arrays are properly sized for all classes
399% This is needed because the Source may be created during class iteration
400% when only some classes exist, and new closed classes added afterwards
401% won't have corresponding entries in sourceClasses/arrivalProcess
402if hasSource
403 nClasses = model.getNumberOfClasses();
404 for k = 1:nClasses
405 if k > length(sourceStation.input.sourceClasses) || isempty(sourceStation.input.sourceClasses{k})
406 sourceStation.input.sourceClasses{k} = {[], ServiceStrategy.LI, Disabled.getInstance()};
407 end
408 if k > length(sourceStation.arrivalProcess) || isempty(sourceStation.arrivalProcess{k})
409 sourceStation.arrivalProcess{k} = Disabled.getInstance();
410 end
411 end
412end
413
414P = model.initRoutingMatrix;
415if hasSource
416 for o = 1:size(openClasses,1)
417 oidx = openClasses(o,1);
418 p = 1 / openClasses(o,2); % divide by mean number of calls, they go to a server at random
419 for m=1:nreplicas
420 P{model.classes{oidx}, model.classes{oidx}}(sourceStation,serverStation{m}) = 1/nreplicas;
421 for n=1:nreplicas
422 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},serverStation{n}) = (1-p)/nreplicas;
423 end
424 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},sinkStation) = p;
425 end
426 cidx = openClasses(o,3); % 3 = source
427 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(sourceStation), oidx];
428 for m=1:nreplicas
429 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), oidx];
430 end
431 end
432end
433
434%% job positions are encoded as follows: 1=client, 2=any of the nreplicas server stations, 3=cache node, 4=fork node, 5=join node
435atClient = 1;
436atServer = 2;
437atCache = 3;
438
439jobPos = atClient; % start at client
440% second pass: setup the routing out of entries
441for tidx_caller = callers
442 % Use same condition as first pass - only process if closed class was created
443 hasDirectCallers = false;
444 isForwardingTarget = false;
445 if ishostlayer
446 if lqn.isref(tidx_caller)
447 hasDirectCallers = true;
448 else
449 for eidx_check = lqn.entriesof{tidx_caller}
450 if any(full(lqn.issynccaller(:, eidx_check))) || any(full(lqn.isasynccaller(:, eidx_check)))
451 hasDirectCallers = true;
452 break;
453 end
454 if isfield(lqn, 'arrival') && ~isempty(lqn.arrival) && ...
455 iscell(lqn.arrival) && eidx_check <= length(lqn.arrival) && ...
456 ~isempty(lqn.arrival{eidx_check})
457 hasDirectCallers = true;
458 break;
459 end
460 % Check if this entry is a forwarding target
461 for cidx_fwd = 1:lqn.ncalls
462 if full(lqn.calltype(cidx_fwd)) == CallType.FWD && full(lqn.callpair(cidx_fwd, 2)) == eidx_check
463 isForwardingTarget = true;
464 break;
465 end
466 end
467 end
468 end
469 end
470 if (ishostlayer && (hasDirectCallers || isForwardingTarget)) | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
471 % for each entry of the calling task
472 ncaller_entries = length(lqn.entriesof{tidx_caller});
473 for eidx = lqn.entriesof{tidx_caller}
474 aidxClass_eidx = aidxClass{eidx};
475 aidxClass_tidx_caller = aidxClass{tidx_caller};
476 % initialize the probability to select an entry to be identical
477 P{aidxClass_tidx_caller, aidxClass_eidx}(clientDelay, clientDelay) = 1 / ncaller_entries;
478 if ncaller_entries > 1
479 % at successive iterations make sure to replace this with throughput ratio
480 self.route_prob_updmap{idx}(end+1,:) = [idx, tidx_caller, eidx, 1, 1, aidxClass_tidx_caller.index, aidxClass_eidx.index];
481 end
482 P = recurActGraph(P, tidx_caller, eidx, aidxClass_eidx, jobPos);
483 end
484 end
485end
486
487% Setup routing for entry-level open arrivals (AFTER recurActGraph to avoid being overwritten)
488if hasSource && ~isempty(entryOpenClasses)
489 for e = 1:size(entryOpenClasses,1)
490 eoidx = entryOpenClasses(e,1); % class index
491 openClass = model.classes{eoidx};
492
493 % Explicitly set routing: ONLY Source → Server → Sink
494 % Zero out all routing for this class first
495 for node1 = 1:length(model.nodes)
496 for node2 = 1:length(model.nodes)
497 P{openClass, openClass}(node1, node2) = 0;
498 end
499 end
500
501 % Now set the correct routing
502 for m=1:nreplicas
503 % Route: Source → ServerStation → Sink
504 P{openClass, openClass}(sourceStation,serverStation{m}) = 1/nreplicas;
505 P{openClass, openClass}(serverStation{m},sinkStation) = 1.0;
506 end
507 end
508end
509
510model.link(P);
511self.ensemble{idx} = model;
512
513 function [P, curClass, jobPos] = recurActGraph(P, tidx_caller, aidx, curClass, jobPos)
514 jobPosKey(aidx) = jobPos;
515 curClassKey{aidx} = curClass;
516 nextaidxs = find(lqn.graph(aidx,:)); % these include the called entries
517 if ~isempty(nextaidxs)
518 isNextPrecFork(aidx) = any(isPostAndAct(nextaidxs)); % indexed on aidx to avoid losing it during the recursion
519 % Save curClass/jobPos before fork branch loop so each branch
520 % starts with the same pre-fork state (prevents curClass
521 % corruption across parallel branches)
522 if isNextPrecFork(aidx)
523 forkSaveCurClass = curClass;
524 forkSaveJobPos = jobPos;
525 end
526 end
527
528 for nextaidx = nextaidxs % for all successor activities
529 if ~isempty(nextaidx)
530 % Restore pre-fork state for each branch iteration
531 if isNextPrecFork(aidx)
532 curClass = forkSaveCurClass;
533 jobPos = forkSaveJobPos;
534 end
535 isLoop = false;
536 % in the activity graph, the following if is entered only
537 % by an edge that is the return from a LOOP activity
538 if (lqn.graph(aidx,nextaidx) ~= lqn.dag(aidx,nextaidx))
539 isLoop = true;
540 end
541 if ~(lqn.parent(aidx) == lqn.parent(nextaidx)) % if different parent task
542 % if the successor activity is an entry of another task, this is a call
543 cidx = matchrow(lqn.callpair,[aidx,nextaidx]); % find the call index
544 switch lqn.calltype(cidx)
545 case CallType.ASYNC
546 % Async calls don't modify caller routing - caller continues immediately without blocking.
547 % Arrival rate at destination is handled via arvproc_classes_updmap (lines 170-195, 230-248).
548 case CallType.SYNC
549 [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass);
550 % Synthetic forwarding: model blocking through
551 % forwarding chains. Following LQNS, the caller
552 % blocks through each hop in the chain. Process
553 % forwarding calls level-by-level (grouped by
554 % source entry) to handle both chains and
555 % multi-destination forwarding.
556 if ~ishostlayer
557 sync_target_eidx = lqn.callpair(cidx, 2);
558 % Process forwarding levels in BFS order
559 cur_level_entries = sync_target_eidx;
560 visited_entries = [];
561 has_fwd = false;
562 while ~isempty(cur_level_entries)
563 next_level_entries = [];
564 for le = 1:length(cur_level_entries)
565 src_eidx = cur_level_entries(le);
566 if ismember(src_eidx, visited_entries), continue; end
567 visited_entries(end+1) = src_eidx; %#ok<AGROW>
568 % Collect FWD calls from this entry
569 level_calls = [];
570 for fc_iter = 1:lqn.ncalls
571 if lqn.calltype(fc_iter) == CallType.FWD && lqn.callpair(fc_iter, 1) == src_eidx ...
572 && length(cidxClass) >= fc_iter && ~isempty(cidxClass{fc_iter})
573 level_calls(end+1) = fc_iter; %#ok<AGROW>
574 end
575 end
576 if isempty(level_calls), continue; end
577 has_fwd = true;
578 total_prob = sum(callmean(level_calls));
579 need_merge = (length(level_calls) > 1) || (total_prob < 1.0 - GlobalConstants.FineTol);
580 if need_merge
581 fwdMergeClass = ClosedClass(model, ['FwdMerge_', lqn.hashnames{src_eidx}], 0, clientDelay);
582 fwdMergeClass.completes = false;
583 fwdMergeClass.attribute = [-1, -1]; % synthetic, skipped in getEnsembleAvg
584 clientDelay.setService(fwdMergeClass, Immediate.getInstance());
585 for m=1:nreplicas
586 serverStation{m}.setService(fwdMergeClass, Disabled.getInstance());
587 end
588 for fk = 1:length(level_calls)
589 lc = level_calls(fk);
590 P{curClass, cidxClass{lc}}(clientDelay, clientDelay) = callmean(lc);
591 P{cidxClass{lc}, fwdMergeClass}(clientDelay, clientDelay) = 1.0;
592 clientDelay.setService(cidxClass{lc}, callservtproc{lc});
593 self.call_classes_updmap{idx}(end+1,:) = [idx, lc, 1, cidxClass{lc}.index];
594 % Queue next level
595 fwd_tgt = lqn.callpair(lc, 2);
596 if ~ismember(fwd_tgt, visited_entries) && ~ismember(fwd_tgt, next_level_entries)
597 next_level_entries(end+1) = fwd_tgt; %#ok<AGROW>
598 end
599 end
600 if total_prob < 1.0 - GlobalConstants.FineTol
601 P{curClass, fwdMergeClass}(clientDelay, clientDelay) = 1.0 - total_prob;
602 end
603 curClass = fwdMergeClass;
604 else
605 % Single FWD with prob 1.0
606 fwd_cidx_iter = level_calls(1);
607 P{curClass, cidxClass{fwd_cidx_iter}}(clientDelay, clientDelay) = 1.0;
608 clientDelay.setService(cidxClass{fwd_cidx_iter}, callservtproc{fwd_cidx_iter});
609 self.call_classes_updmap{idx}(end+1,:) = [idx, fwd_cidx_iter, 1, cidxClass{fwd_cidx_iter}.index];
610 curClass = cidxClass{fwd_cidx_iter};
611 % Queue next level
612 fwd_tgt = lqn.callpair(fwd_cidx_iter, 2);
613 if ~ismember(fwd_tgt, visited_entries) && ~ismember(fwd_tgt, next_level_entries)
614 next_level_entries(end+1) = fwd_tgt; %#ok<AGROW>
615 end
616 end
617 end
618 cur_level_entries = next_level_entries;
619 end
620 if has_fwd
621 jobPos = atClient;
622 end
623 end
624 case CallType.FWD
625 % In the host layer, forwarding targets have their own
626 % jobs competing for the processor independently.
627 % The source task's job returns to think time after
628 % completing its own activities; no routing through
629 % the forwarding target's activities is needed.
630 % curClass and jobPos remain unchanged.
631 end
632 else
633 % at this point, we have processed all calls, let us do the
634 % activities local to the task next
635 if isempty(intersect(lqn.eshift+(1:lqn.nentries), nextaidxs))
636 % if next activity is not an entry
637 jobPos = jobPosKey(aidx);
638 curClass = curClassKey{aidx};
639 else
640 if ismember(nextaidxs(find(nextaidxs==nextaidx)-1), lqn.eshift+(1:lqn.nentries))
641 curClassC = curClass;
642 end
643 jobPos = atClient;
644 curClass = curClassC;
645 end
646 if jobPos == atClient % at client node
647 if ishostlayer
648 if ~iscachelayer
649 for m=1:nreplicas
650 if isNextPrecFork(aidx)
651 % if next activity is a post-and
652 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
653 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
654 forkClassStack(end+1) = curClass.index;
655 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
656 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
657 else
658 if isPreAndAct(aidx)
659 % before entering the job we go back to the entry class at the last fork
660 forkClass = model.classes{forkClassStack(end)};
661 forkClassStack(end) = [];
662 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
663 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
664 else
665 P{curClass, aidxClass{nextaidx}}(clientDelay,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
666 end
667 end
668 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
669 if isfunctionlayer
670 serverStation{m}.setDelayOff(aidxClass{nextaidx}, lqn.setuptime{lqn.parent(nextaidx)}, lqn.delayofftime{lqn.parent(nextaidx)});
671 end
672 end
673 jobPos = atServer;
674 curClass = aidxClass{nextaidx};
675 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
676 else
677 P{curClass, aidxClass{nextaidx}}(clientDelay,cacheNode) = full(lqn.graph(aidx,nextaidx));
678
679 cacheNode.setReadItemEntry(aidxClass{nextaidx},lqn.itemproc{aidx},lqn.nitems(aidx));
680 lqn.hitmissaidx = find(lqn.graph(nextaidx,:));
681 lqn.hitaidx = lqn.hitmissaidx(1);
682 lqn.missaidx = lqn.hitmissaidx(2);
683
684 cacheNode.setHitClass(aidxClass{nextaidx},aidxClass{lqn.hitaidx});
685 cacheNode.setMissClass(aidxClass{nextaidx},aidxClass{lqn.missaidx});
686
687 jobPos = atCache; % cache
688 curClass = aidxClass{nextaidx};
689 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.hitaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.hitaidx}.index];
690 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.missaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.missaidx}.index];
691 end
692 else % not ishostlayer
693 if isNextPrecFork(aidx)
694 % if next activity is a post-and
695 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
696 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
697 forkClassStack(end+1) = curClass.index;
698 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
699 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
700 else
701 if isPreAndAct(aidx)
702 % before entering the job we go back to the entry class at the last fork
703 forkClass = model.classes{forkClassStack(end)};
704 forkClassStack(end) = [];
705 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
706 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
707 else
708 P{curClass, aidxClass{nextaidx}}(clientDelay,clientDelay) = full(lqn.graph(aidx,nextaidx));
709 end
710 end
711 jobPos = atClient;
712 curClass = aidxClass{nextaidx};
713 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
714 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
715 end
716 elseif jobPos == atServer || jobPos == atCache % at server station
717 if ishostlayer
718 if iscachelayer
719 curClass = aidxClass{nextaidx};
720 for m=1:nreplicas
721 if isNextPrecFork(aidx)
722 % if next activity is a post-and
723 P{curClass, curClass}(cacheNode, forkNode) = 1.0;
724 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
725 forkClassStack(end+1) = curClass.index;
726 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
727 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
728 else
729 if isPreAndAct(aidx)
730 % before entering the job we go back to the entry class at the last fork
731 forkClass = model.classes{forkClassStack(end)};
732 forkClassStack(end) = [];
733
734 P{curClass, forkClass}(cacheNode,joinNode) = 1.0;
735 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
736 else
737 P{curClass, aidxClass{nextaidx}}(cacheNode,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
738 end
739 end
740 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
741 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, nextaidx, 3, 2, aidxClass{nextaidx}.index, aidxClass{nextaidx}.index];
742 end
743 else
744 for m=1:nreplicas
745 if isNextPrecFork(aidx)
746 % if next activity is a post-and
747 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
748 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
749 forkClassStack(end+1) = curClass.index;
750 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
751 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
752 else
753 if isPreAndAct(aidx)
754 % before entering the job we go back to the entry class at the last fork
755 forkClass = model.classes{forkClassStack(end)};
756 forkClassStack(end) = [];
757 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
758 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
759 else
760 P{curClass, aidxClass{nextaidx}}(serverStation{m},serverStation{m}) = full(lqn.graph(aidx,nextaidx));
761 end
762 end
763 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
764 end
765 end
766 jobPos = atServer;
767 curClass = aidxClass{nextaidx};
768 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
769 else
770 for m=1:nreplicas
771 if isNextPrecFork(aidx)
772 % if next activity is a post-and
773 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
774 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
775 forkClassStack(end+1) = curClass.index;
776 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
777 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
778 else
779 if isPreAndAct(aidx)
780 % before entering the job we go back to the entry class at the last fork
781 forkClass = model.classes{forkClassStack(end)};
782 forkClassStack(end) = [];
783 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
784 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
785 else
786 P{curClass, aidxClass{nextaidx}}(serverStation{m},clientDelay) = full(lqn.graph(aidx,nextaidx));
787 end
788 end
789 end
790 jobPos = atClient;
791 curClass = aidxClass{nextaidx};
792 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
793 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
794 end
795 end
796 if aidx ~= nextaidx && ~isLoop
797 %% now recursively build the rest of the routing matrix graph
798 [P, curClass, jobPos] = recurActGraph(P, tidx_caller, nextaidx, curClass, jobPos);
799
800 % At this point curClassRec is the last class in the
801 % recursive branch, which we now close with a reply
802 if jobPos == atClient
803 P{curClass, aidxClass{tidx_caller}}(clientDelay,clientDelay) = 1;
804 if ~strcmp(curClass.name(end-3:end),'.Aux')
805 curClass.completes = true;
806 end
807 else
808 for m=1:nreplicas
809 P{curClass, aidxClass{tidx_caller}}(serverStation{m},clientDelay) = 1;
810 end
811 if ~strcmp(curClass.name(end-3:end),'.Aux')
812 curClass.completes = true;
813 end
814 end
815 end
816 end
817 end
818 end % nextaidx
819 end
820
821 function [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass)
822 switch jobPos
823 case atClient
824 if lqn.parent(lqn.callpair(cidx,2)) == idx
825 % if a call to an entry of the server in this layer
826 if callmean(cidx) < nreplicas
827 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx); % note that callmean(cidx) < nreplicas
828 for m=1:nreplicas
829 % if isNextPrecFork(aidx)
830 % end
831 % % if next activity is a post-and
832 % P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
833 % forkStackClass(end+1) = curClass.index;
834 % f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
835 % P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
836 % P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
837 % else
838 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = callmean(cidx) / nreplicas;
839 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
840 end
841 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
842 elseif callmean(cidx) == nreplicas
843 for m=1:nreplicas
844 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
845 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0;
846 end
847 else % callmean(cidx) > nreplicas
848 for m=1:nreplicas
849 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
850 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1.0 ;
851 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1.0 - 1.0 / (callmean(cidx) / nreplicas);
852 end
853 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0 / (callmean(cidx));
854 end
855 jobPos = atClient;
856 clientDelay.setService(cidxClass{cidx}, Immediate.getInstance());
857 for m=1:nreplicas
858 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
859 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
860 end
861 curClass = cidxClass{cidx};
862 else
863 % if it is not a call to an entry of the server
864 if callmean(cidx) < nreplicas
865 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
866 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
867 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
868 curClass = cidxAuxClass{cidx};
869 elseif callmean(cidx) == nreplicas
870 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1;
871 curClass = cidxClass{cidx};
872 else % callmean(cidx) > nreplicas
873 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
874 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;% / (callmean(cidx)/nreplicas); % the mean number of calls is now embedded in the demand
875 curClass = cidxAuxClass{cidx};
876 end
877 jobPos = atClient;
878 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
879 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
880 end
881 case atServer % job at server
882 if lqn.parent(lqn.callpair(cidx,2)) == idx
883 % if it is a call to an entry of the server
884 if callmean(cidx) < nreplicas
885 for m=1:nreplicas
886 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1 - callmean(cidx);
887 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = callmean(cidx);
888 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
889 end
890 jobPos = atClient;
891 curClass = cidxAuxClass{cidx};
892 elseif callmean(cidx) == nreplicas
893 for m=1:nreplicas
894 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
895 end
896 jobPos = atServer;
897 curClass = cidxClass{cidx};
898 else % callmean(cidx) > nreplicas
899 for m=1:nreplicas
900 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
901 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1 - 1 / (callmean(cidx));
902 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1 / (callmean(cidx));
903 end
904 jobPos = atClient;
905 curClass = cidxAuxClass{cidx};
906 end
907 for m=1:nreplicas
908 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
909 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
910 end
911 else
912 % if it is not a call to an entry of the server
913 % callmean not needed since we switched
914 % to ResidT to model service time at client
915 if callmean(cidx) < nreplicas
916 for m=1:nreplicas
917 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
918 end
919 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
920 curClass = cidxAuxClass{cidx};
921 elseif callmean(cidx) == nreplicas
922 for m=1:nreplicas
923 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
924 end
925 curClass = cidxClass{cidx};
926 else % callmean(cidx) > nreplicas
927 for m=1:nreplicas
928 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
929 end
930 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
931 curClass = cidxAuxClass{cidx};
932 end
933 jobPos = atClient;
934 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
935 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
936 end
937 end
938
939 % After synch call returns, route through activity think-time class if applicable
940 callingAidx = lqn.callpair(cidx, 1); % source activity of the call
941 if ~isempty(aidxThinkClass{callingAidx})
942 % Route from current class to think-time class at client
943 P{curClass, aidxThinkClass{callingAidx}}(clientDelay, clientDelay) = 1.0;
944 curClass = aidxThinkClass{callingAidx};
945 jobPos = atClient;
946 end
947 end
948end
Definition mmt.m:124