LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
buildLayersRecursive.m
1function buildLayersRecursive(self, idx, callers, ishostlayer)
2lqn = self.lqn;
3jobPosKey = zeros(lqn.nidx,1);
4curClassKey = cell(lqn.nidx,1);
5nreplicas = lqn.repl(idx);
6%mult = lqn.mult;
7mult = lqn.maxmult; % this removes spare capacity that cannot be used
8lqn.mult = mult;
9callservtproc = self.callservtproc;
10model = Network(lqn.hashnames{idx});
11model.setChecks(false); % fast mode
12model.attribute = struct('hosts',[],'tasks',[],'entries',[],'activities',[],'calls',[],'serverIdx',0);
13if ishostlayer | any(any(lqn.issynccaller(callers, lqn.entriesof{idx}))) | any(any(lqn.isasynccaller(callers, lqn.entriesof{idx}))) %#ok<OR2>
14 clientDelay = Delay(model, 'Clients');
15 model.attribute.clientIdx = 1;
16 model.attribute.serverIdx = 2;
17 model.attribute.sourceIdx = NaN;
18else
19 model.attribute.serverIdx = 1;
20 model.attribute.clientIdx = NaN;
21 model.attribute.sourceIdx = NaN;
22end
23serverStation = cell(1,nreplicas);
24isfunctionlayer = all(lqn.isfunction(callers)) && ishostlayer;
25for m=1:nreplicas
26 if m == 1
27 serverStation{m} = Queue(model,lqn.hashnames{idx}, lqn.sched(idx));
28 else
29 serverStation{m} = Queue(model,[lqn.hashnames{idx},'.',num2str(m)], lqn.sched(idx));
30 end
31 serverStation{m}.setNumberOfServers(mult(idx));
32 serverStation{m}.attribute.ishost = ishostlayer;
33 serverStation{m}.attribute.idx = idx;
34end
35
36iscachelayer = all(lqn.iscache(callers)) && ishostlayer;
37if iscachelayer
38 cacheNode = Cache(model, lqn.hashnames{callers}, lqn.nitems(callers), lqn.itemcap{callers}, lqn.replacestrat(callers));
39end
40
41actsInCaller = lqn.actsof{callers};
42isPostAndAct = full(lqn.actposttype)==ActivityPrecedenceType.POST_AND;
43isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
44hasfork = any(intersect(find(isPostAndAct),actsInCaller));
45
46maxfanout = 1; % maximum output parallelism level of fork nodes
47for aidx = actsInCaller(:)'
48 successors = find(lqn.graph(aidx,:));
49 if any(isPostAndAct(successors))
50 maxfanout = max(maxfanout, sum(isPostAndAct(successors)));
51 end
52end
53
54if hasfork
55 forkNode = Fork(model, 'Fork_PostAnd');
56 for f=1:maxfanout
57 forkOutputRouter{f} = Router(model, ['Fork_PostAnd_',num2str(f)]);
58 end
59 forkClassStack = []; % stack with the entry class at the visited forks, the last visited is end of the list.
60end
61
62isPreAndAct = full(lqn.actpretype)==ActivityPrecedenceType.PRE_AND;
63hasjoin = any(isPreAndAct(actsInCaller));
64if hasjoin
65 joinNode = Join(model, 'Join_PreAnd', forkNode);
66end
67
68aidxClass = cell(1,lqn.nentries+lqn.nacts);
69cidxClass = cell(1,0);
70cidxAuxClass = cell(1,0);
71
72self.servt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % server classes to update
73self.thinkt_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % client classes to update
74self.arvproc_classes_updmap{idx} = zeros(0,4); % [modelidx, actidx, node, class] % classes to update in the next iteration for asynch calls
75self.call_classes_updmap{idx} = zeros(0,4); % [modelidx, callidx, node, class] % calls classes to update in the next iteration (includes calls in client classes)
76self.route_prob_updmap{idx} = zeros(0,7); % [modelidx, actidxfrom, actidxto, nodefrom, nodeto, classfrom, classto] % routing probabilities to update in the next iteration
77
78if ishostlayer
79 model.attribute.hosts(end+1,:) = [NaN, model.attribute.serverIdx ];
80else
81 model.attribute.tasks(end+1,:) = [NaN, model.attribute.serverIdx ];
82end
83
84hasSource = false; % flag whether a source is needed
85openClasses = [];
86% first pass: create the classes
87for tidx_caller = callers
88 if ishostlayer | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2> % if it is only an asynch caller the closed classes are not needed
89 if self.njobs(tidx_caller,idx) == 0
90 % for each entry of the calling task
91 % determine job population
92 % this block matches the corresponding calculations in
93 % updateThinkTimes
94 njobs = mult(tidx_caller)*lqn.repl(tidx_caller);
95 if isinf(njobs)
96 callers_of_tidx_caller = find(lqn.taskgraph(:,tidx_caller));
97 njobs = sum(mult(callers_of_tidx_caller)); %#ok<FNDSB>
98 if isinf(njobs)
99 % if also the callers of tidx_caller are inf servers, then use
100 % an heuristic
101 njobs = min(sum(mult(isfinite(mult)) .* lqn.repl(isfinite(mult))),1e6);
102 end
103 end
104 self.njobs(tidx_caller,idx) = njobs;
105 else
106 njobs = self.njobs(tidx_caller,idx);
107 end
108 caller_name = lqn.hashnames{tidx_caller};
109 aidxClass{tidx_caller} = ClosedClass(model, caller_name, njobs, clientDelay);
110 clientDelay.setService(aidxClass{tidx_caller}, Disabled.getInstance());
111 for m=1:nreplicas
112 serverStation{m}.setService(aidxClass{tidx_caller}, Disabled.getInstance());
113 end
114 aidxClass{tidx_caller}.completes = false;
115 aidxClass{tidx_caller}.setReferenceClass(true); % renormalize residence times using the visits to the task
116 aidxClass{tidx_caller}.attribute = [LayeredNetworkElement.TASK, tidx_caller];
117 model.attribute.tasks(end+1,:) = [aidxClass{tidx_caller}.index, tidx_caller];
118 %self.thinkproc
119 clientDelay.setService(aidxClass{tidx_caller}, self.thinkproc{tidx_caller});
120 if ~lqn.isref(tidx_caller)
121 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, tidx_caller, 1, aidxClass{tidx_caller}.index];
122 end
123 for eidx = lqn.entriesof{tidx_caller}
124 % create a class
125 aidxClass{eidx} = ClosedClass(model, lqn.hashnames{eidx}, 0, clientDelay);
126 clientDelay.setService(aidxClass{eidx}, Disabled.getInstance());
127 for m=1:nreplicas
128 serverStation{m}.setService(aidxClass{eidx}, Disabled.getInstance());
129 end
130 aidxClass{eidx}.completes = false;
131 aidxClass{eidx}.attribute = [LayeredNetworkElement.ENTRY, eidx];
132 model.attribute.entries(end+1,:) = [aidxClass{eidx}.index, eidx];
133 [singleton, javasingleton] = Immediate.getInstance();
134 if isempty(model.obj)
135 clientDelay.setService(aidxClass{eidx}, singleton);
136 else
137 clientDelay.setService(aidxClass{eidx}, javasingleton);
138 end
139 end
140 end
141
142 % for each activity of the calling task
143 for aidx = lqn.actsof{tidx_caller}
144 if ishostlayer | any(any(lqn.issynccaller(tidx_caller, lqn.entriesof{idx}))) %#ok<OR2>
145 % create a class
146 aidxClass{aidx} = ClosedClass(model, lqn.hashnames{aidx}, 0, clientDelay);
147 clientDelay.setService(aidxClass{aidx}, Disabled.getInstance());
148 for m=1:nreplicas
149 serverStation{m}.setService(aidxClass{aidx}, Disabled.getInstance());
150 end
151 aidxClass{aidx}.completes = false;
152 aidxClass{aidx}.attribute = [LayeredNetworkElement.ACTIVITY, aidx];
153 model.attribute.activities(end+1,:) = [aidxClass{aidx}.index, aidx];
154 hidx = lqn.parent(lqn.parent(aidx)); % index of host processor
155 if ~(ishostlayer && (hidx == idx))
156 % set the host demand for the activity
157 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
158 end
159 if lqn.sched(tidx_caller)~=SchedStrategy.REF % in 'ref' case the service activity is constant
160 % updmap(end+1,:) = [idx, aidx, 1, idxClass{aidx}.index];
161 end
162 if iscachelayer && full(lqn.graph(eidx,aidx))
163 clientDelay.setService(aidxClass{aidx}, self.servtproc{aidx});
164 end
165 end
166 % add a class for each outgoing call from this activity
167 for cidx = lqn.callsof{aidx}
168 callmean(cidx) = lqn.callproc{cidx}.getMean;
169 switch lqn.calltype(cidx)
170 case CallType.ASYNC
171 if lqn.parent(lqn.callpair(cidx,2)) == idx % add only if the target is serverStation
172 if ~hasSource % we need to add source and sink to the model
173 hasSource = true;
174 model.attribute.sourceIdx = length(model.nodes)+1;
175 sourceStation = Source(model,'Source');
176 sinkStation = Sink(model,'Sink');
177 end
178 cidxClass{cidx} = OpenClass(model, lqn.callhashnames{cidx}, 0);
179 sourceStation.setArrival(cidxClass{cidx}, Immediate.getInstance());
180 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
181 for m=1:nreplicas
182 serverStation{m}.setService(cidxClass{cidx}, Immediate.getInstance());
183 end
184 openClasses(end+1,:) = [cidxClass{cidx}.index, callmean(cidx), cidx];
185 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
186 cidxClass{cidx}.completes = false;
187 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
188 minRespT = 0;
189 for tidx_act = lqn.actsof{idx}
190 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
191 end
192 for m=1:nreplicas
193 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
194 end
195 end
196 case CallType.SYNC
197 cidxClass{cidx} = ClosedClass(model, lqn.callhashnames{cidx}, 0, clientDelay);
198 clientDelay.setService(cidxClass{cidx}, Disabled.getInstance());
199 for m=1:nreplicas
200 serverStation{m}.setService(cidxClass{cidx}, Disabled.getInstance());
201 end
202 model.attribute.calls(end+1,:) = [cidxClass{cidx}.index, cidx, lqn.callpair(cidx,1), lqn.callpair(cidx,2)];
203 cidxClass{cidx}.completes = false;
204 cidxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
205 minRespT = 0;
206 for tidx_act = lqn.actsof{idx}
207 minRespT = minRespT + lqn.hostdem{tidx_act}.getMean; % upper bound, uses all activities not just the ones reachable by this entry
208 end
209 for m=1:nreplicas
210 serverStation{m}.setService(cidxClass{cidx}, Exp.fitMean(minRespT));
211 end
212 end
213
214 if callmean(cidx) ~= nreplicas
215 switch lqn.calltype(cidx)
216 case CallType.SYNC
217 cidxAuxClass{cidx} = ClosedClass(model, [lqn.callhashnames{cidx},'.Aux'], 0, clientDelay);
218 cidxAuxClass{cidx}.completes = false;
219 cidxAuxClass{cidx}.attribute = [LayeredNetworkElement.CALL, cidx];
220 clientDelay.setService(cidxAuxClass{cidx}, Immediate.getInstance());
221 for m=1:nreplicas
222 serverStation{m}.setService(cidxAuxClass{cidx}, Disabled.getInstance());
223 end
224 end
225 end
226 end
227 end
228end
229
230P = model.initRoutingMatrix;
231if hasSource
232 for o = 1:size(openClasses,1)
233 oidx = openClasses(o,1);
234 p = 1 / openClasses(o,2); % divide by mean number of calls, they go to a server at random
235 for m=1:nreplicas
236 P{model.classes{oidx}, model.classes{oidx}}(sourceStation,serverStation{m}) = 1/nreplicas;
237 for n=1:nreplicas
238 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},serverStation{n}) = (1-p)/nreplicas;
239 end
240 P{model.classes{oidx}, model.classes{oidx}}(serverStation{m},sinkStation) = p;
241 end
242 cidx = openClasses(o,3); % 3 = source
243 self.arvproc_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(sourceStation), oidx];
244 for m=1:nreplicas
245 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), oidx];
246 end
247 end
248end
249
250%% job positions are encoded as follows: 1=client, 2=any of the nreplicas server stations, 3=cache node, 4=fork node, 5=join node
251atClient = 1;
252atServer = 2;
253atCache = 3;
254
255jobPos = atClient; % start at client
256% second pass: setup the routing out of entries
257for tidx_caller = callers
258 if lqn.issynccaller(tidx_caller, idx) | ishostlayer % if it is only an asynch caller the closed classes are not needed
259 % for each entry of the calling task
260 ncaller_entries = length(lqn.entriesof{tidx_caller});
261 for eidx = lqn.entriesof{tidx_caller}
262 aidxClass_eidx = aidxClass{eidx};
263 aidxClass_tidx_caller = aidxClass{tidx_caller};
264 % initialize the probability to select an entry to be identical
265 P{aidxClass_tidx_caller, aidxClass_eidx}(clientDelay, clientDelay) = 1 / ncaller_entries;
266 if ncaller_entries > 1
267 % at successive iterations make sure to replace this with throughput ratio
268 self.route_prob_updmap{idx}(end+1,:) = [idx, tidx_caller, eidx, 1, 1, aidxClass_tidx_caller.index, aidxClass_eidx.index];
269 end
270 P = recurActGraph(P, tidx_caller, eidx, aidxClass_eidx, jobPos);
271 end
272 end
273end
274model.link(P);
275self.ensemble{idx} = model;
276
277 function [P, curClass, jobPos] = recurActGraph(P, tidx_caller, aidx, curClass, jobPos)
278 jobPosKey(aidx) = jobPos;
279 curClassKey{aidx} = curClass;
280 nextaidxs = find(lqn.graph(aidx,:)); % these include the called entries
281 if ~isempty(nextaidxs)
282 isNextPrecFork(aidx) = any(isPostAndAct(nextaidxs)); % indexed on aidx to avoid losing it during the recursion
283 end
284
285 for nextaidx = nextaidxs % for all successor activities
286 if ~isempty(nextaidx)
287 isLoop = false;
288 % in the activity graph, the following if is entered only
289 % by an edge that is the return from a LOOP activity
290 if (lqn.graph(aidx,nextaidx) ~= lqn.dag(aidx,nextaidx))
291 isLoop = true;
292 end
293 if ~(lqn.parent(aidx) == lqn.parent(nextaidx)) % if different parent task
294 % if the successor activity is an entry of another task, this is a call
295 cidx = matchrow(lqn.callpair,[aidx,nextaidx]); % find the call index
296 switch lqn.calltype(cidx)
297 case CallType.ASYNC
298 % Async calls don't modify caller routing - caller continues immediately without blocking.
299 % Arrival rate at destination is handled via arvproc_classes_updmap (lines 170-195, 230-248).
300 case CallType.SYNC
301 [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass);
302 end
303 else
304 % at this point, we have processed all calls, let us do the
305 % activities local to the task next
306 if isempty(intersect(lqn.eshift+(1:lqn.nentries), nextaidxs))
307 % if next activity is not an entry
308 jobPos = jobPosKey(aidx);
309 curClass = curClassKey{aidx};
310 else
311 if ismember(nextaidxs(find(nextaidxs==nextaidx)-1), lqn.eshift+(1:lqn.nentries))
312 curClassC = curClass;
313 end
314 jobPos = atClient;
315 curClass = curClassC;
316 end
317 if jobPos == atClient % at client node
318 if ishostlayer
319 if ~iscachelayer
320 for m=1:nreplicas
321 if isNextPrecFork(aidx)
322 % if next activity is a post-and
323 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
324 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
325 forkClassStack(end+1) = curClass.index;
326 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
327 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
328 else
329 if isPreAndAct(aidx)
330 % before entering the job we go back to the entry class at the last fork
331 forkClass = model.classes{forkClassStack(end)};
332 forkClassStack(end) = [];
333 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
334 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
335 else
336 P{curClass, aidxClass{nextaidx}}(clientDelay,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
337 end
338 end
339 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
340 if isfunctionlayer
341 serverStation{m}.setDelayOff(aidxClass{nextaidx}, lqn.setuptime{lqn.parent(nextaidx)}, lqn.delayofftime{lqn.parent(nextaidx)});
342 end
343 end
344 jobPos = atServer;
345 curClass = aidxClass{nextaidx};
346 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
347 else
348 P{curClass, aidxClass{nextaidx}}(clientDelay,cacheNode) = full(lqn.graph(aidx,nextaidx));
349
350 cacheNode.setReadItemEntry(aidxClass{nextaidx},lqn.itemproc{aidx},lqn.nitems(aidx));
351 lqn.hitmissaidx = find(lqn.graph(nextaidx,:));
352 lqn.hitaidx = lqn.hitmissaidx(1);
353 lqn.missaidx = lqn.hitmissaidx(2);
354
355 cacheNode.setHitClass(aidxClass{nextaidx},aidxClass{lqn.hitaidx});
356 cacheNode.setMissClass(aidxClass{nextaidx},aidxClass{lqn.missaidx});
357
358 jobPos = atCache; % cache
359 curClass = aidxClass{nextaidx};
360 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.hitaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.hitaidx}.index];
361 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, lqn.missaidx, 3, 3, aidxClass{nextaidx}.index, aidxClass{lqn.missaidx}.index];
362 end
363 else % not ishostlayer
364 if isNextPrecFork(aidx)
365 % if next activity is a post-and
366 P{curClass, curClass}(clientDelay, forkNode) = 1.0;
367 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
368 forkClassStack(end+1) = curClass.index;
369 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
370 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
371 else
372 if isPreAndAct(aidx)
373 % before entering the job we go back to the entry class at the last fork
374 forkClass = model.classes{forkClassStack(end)};
375 forkClassStack(end) = [];
376 P{curClass, forkClass}(clientDelay,joinNode) = 1.0;
377 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
378 else
379 P{curClass, aidxClass{nextaidx}}(clientDelay,clientDelay) = full(lqn.graph(aidx,nextaidx));
380 end
381 end
382 jobPos = atClient;
383 curClass = aidxClass{nextaidx};
384 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
385 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
386 end
387 elseif jobPos == atServer || jobPos == atCache % at server station
388 if ishostlayer
389 if iscachelayer
390 curClass = aidxClass{nextaidx};
391 for m=1:nreplicas
392 if isNextPrecFork(aidx)
393 % if next activity is a post-and
394 P{curClass, curClass}(cacheNode, forkNode) = 1.0;
395 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
396 forkClassStack(end+1) = curClass.index;
397 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
398 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
399 else
400 if isPreAndAct(aidx)
401 % before entering the job we go back to the entry class at the last fork
402 forkClass = model.classes{forkClassStack(end)};
403 forkClassStack(end) = [];
404
405 P{curClass, forkClass}(cacheNode,joinNode) = 1.0;
406 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
407 else
408 P{curClass, aidxClass{nextaidx}}(cacheNode,serverStation{m}) = full(lqn.graph(aidx,nextaidx));
409 end
410 end
411 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
412 %self.route_prob_updmap{idx}(end+1,:) = [idx, nextaidx, nextaidx, 3, 2, aidxClass{nextaidx}.index, aidxClass{nextaidx}.index];
413 end
414 else
415 for m=1:nreplicas
416 if isNextPrecFork(aidx)
417 % if next activity is a post-and
418 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
419 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
420 forkClassStack(end+1) = curClass.index;
421 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
422 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, serverStation{m}) = 1.0;
423 else
424 if isPreAndAct(aidx)
425 % before entering the job we go back to the entry class at the last fork
426 forkClass = model.classes{forkClassStack(end)};
427 forkClassStack(end) = [];
428 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
429 P{forkClass, aidxClass{nextaidx}}(joinNode,serverStation{m}) = 1.0;
430 else
431 P{curClass, aidxClass{nextaidx}}(serverStation{m},serverStation{m}) = full(lqn.graph(aidx,nextaidx));
432 end
433 end
434 serverStation{m}.setService(aidxClass{nextaidx}, lqn.hostdem{nextaidx});
435 end
436 end
437 jobPos = atServer;
438 curClass = aidxClass{nextaidx};
439 self.servt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 2, aidxClass{nextaidx}.index];
440 else
441 for m=1:nreplicas
442 if isNextPrecFork(aidx)
443 % if next activity is a post-and
444 P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
445 f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
446 forkClassStack(end+1) = curClass.index;
447 P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
448 P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
449 else
450 if isPreAndAct(aidx)
451 % before entering the job we go back to the entry class at the last fork
452 forkClass = model.classes{forkClassStack(end)};
453 forkClassStack(end) = [];
454 P{curClass, forkClass}(serverStation{m},joinNode) = 1.0;
455 P{forkClass, aidxClass{nextaidx}}(joinNode,clientDelay) = 1.0;
456 else
457 P{curClass, aidxClass{nextaidx}}(serverStation{m},clientDelay) = full(lqn.graph(aidx,nextaidx));
458 end
459 end
460 end
461 jobPos = atClient;
462 curClass = aidxClass{nextaidx};
463 clientDelay.setService(aidxClass{nextaidx}, self.servtproc{nextaidx});
464 self.thinkt_classes_updmap{idx}(end+1,:) = [idx, nextaidx, 1, aidxClass{nextaidx}.index];
465 end
466 end
467 if aidx ~= nextaidx && ~isLoop
468 %% now recursively build the rest of the routing matrix graph
469 [P, curClass, jobPos] = recurActGraph(P, tidx_caller, nextaidx, curClass, jobPos);
470
471 % At this point curClassRec is the last class in the
472 % recursive branch, which we now close with a reply
473 if jobPos == atClient
474 P{curClass, aidxClass{tidx_caller}}(clientDelay,clientDelay) = 1;
475 if ~strcmp(curClass.name(end-3:end),'.Aux')
476 curClass.completes = true;
477 end
478 else
479 for m=1:nreplicas
480 P{curClass, aidxClass{tidx_caller}}(serverStation{m},clientDelay) = 1;
481 end
482 if ~strcmp(curClass.name(end-3:end),'.Aux')
483 curClass.completes = true;
484 end
485 end
486 end
487 end
488 end
489 end % nextaidx
490 end
491
492 function [P, jobPos, curClass] = routeSynchCall(P, jobPos, curClass)
493 switch jobPos
494 case atClient
495 if lqn.parent(lqn.callpair(cidx,2)) == idx
496 % if a call to an entry of the server in this layer
497 if callmean(cidx) < nreplicas
498 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx); % note that callmean(cidx) < nreplicas
499 for m=1:nreplicas
500 % if isNextPrecFork(aidx)
501 % end
502 % % if next activity is a post-and
503 % P{curClass, curClass}(serverStation{m}, forkNode) = 1.0;
504 % forkStackClass(end+1) = curClass.index;
505 % f = find(nextaidx == nextaidxs(isPostAndAct(nextaidxs)));
506 % P{curClass, curClass}(forkNode, forkOutputRouter{f}) = 1.0;
507 % P{curClass, aidxClass{nextaidx}}(forkOutputRouter{f}, clientDelay) = 1.0;
508 % else
509 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = callmean(cidx) / nreplicas;
510 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
511 end
512 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0; % not needed, just to avoid leaving the Aux class disconnected
513 elseif callmean(cidx) == nreplicas
514 for m=1:nreplicas
515 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
516 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},clientDelay) = 1.0;
517 end
518 else % callmean(cidx) > nreplicas
519 for m=1:nreplicas
520 P{curClass, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1 / nreplicas;
521 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1.0 ;
522 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,serverStation{m}) = 1.0 - 1.0 / (callmean(cidx) / nreplicas);
523 end
524 P{cidxAuxClass{cidx}, cidxClass{cidx}}(clientDelay,clientDelay) = 1.0 / (callmean(cidx));
525 end
526 jobPos = atClient;
527 clientDelay.setService(cidxClass{cidx}, Immediate.getInstance());
528 for m=1:nreplicas
529 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
530 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
531 end
532 curClass = cidxClass{cidx};
533 else
534 % if it is not a call to an entry of the server
535 if callmean(cidx) < nreplicas
536 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
537 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
538 P{curClass, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1 - callmean(cidx)/nreplicas; % the mean number of calls is now embedded in the demand
539 curClass = cidxAuxClass{cidx};
540 elseif callmean(cidx) == nreplicas
541 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1;
542 curClass = cidxClass{cidx};
543 else % callmean(cidx) > nreplicas
544 P{curClass, cidxClass{cidx}}(clientDelay,clientDelay) = 1; % the mean number of calls is now embedded in the demand
545 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;% / (callmean(cidx)/nreplicas); % the mean number of calls is now embedded in the demand
546 curClass = cidxAuxClass{cidx};
547 end
548 jobPos = atClient;
549 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
550 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
551 end
552 case atServer % job at server
553 if lqn.parent(lqn.callpair(cidx,2)) == idx
554 % if it is a call to an entry of the server
555 if callmean(cidx) < nreplicas
556 for m=1:nreplicas
557 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1 - callmean(cidx);
558 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = callmean(cidx);
559 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
560 end
561 jobPos = atClient;
562 curClass = cidxAuxClass{cidx};
563 elseif callmean(cidx) == nreplicas
564 for m=1:nreplicas
565 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
566 end
567 jobPos = atServer;
568 curClass = cidxClass{cidx};
569 else % callmean(cidx) > nreplicas
570 for m=1:nreplicas
571 P{curClass, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1;
572 P{cidxClass{cidx}, cidxClass{cidx}}(serverStation{m},serverStation{m}) = 1 - 1 / (callmean(cidx));
573 P{cidxClass{cidx}, cidxAuxClass{cidx}}(serverStation{m},clientDelay) = 1 / (callmean(cidx));
574 end
575 jobPos = atClient;
576 curClass = cidxAuxClass{cidx};
577 end
578 for m=1:nreplicas
579 serverStation{m}.setService(cidxClass{cidx}, callservtproc{cidx});
580 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, model.getNodeIndex(serverStation{m}), cidxClass{cidx}.index];
581 end
582 else
583 % if it is not a call to an entry of the server
584 % callmean not needed since we switched
585 % to ResidT to model service time at client
586 if callmean(cidx) < nreplicas
587 for m=1:nreplicas
588 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
589 end
590 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
591 curClass = cidxAuxClass{cidx};
592 elseif callmean(cidx) == nreplicas
593 for m=1:nreplicas
594 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
595 end
596 curClass = cidxClass{cidx};
597 else % callmean(cidx) > nreplicas
598 for m=1:nreplicas
599 P{curClass, cidxClass{cidx}}(serverStation{m},clientDelay) = 1;
600 end
601 P{cidxClass{cidx}, cidxAuxClass{cidx}}(clientDelay,clientDelay) = 1;
602 curClass = cidxAuxClass{cidx};
603 end
604 jobPos = atClient;
605 clientDelay.setService(cidxClass{cidx}, callservtproc{cidx});
606 self.call_classes_updmap{idx}(end+1,:) = [idx, cidx, 1, cidxClass{cidx}.index];
607 end
608 end
609 end
610end
Definition mmt.m:92